commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
bcf4c6be490b96230877d9388c27783abdbc487e
|
Fix aibrake
|
lib/ansible/modules/extras/monitoring/airbrake_deployment.py
|
lib/ansible/modules/extras/monitoring/airbrake_deployment.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2013 Bruce Pennypacker <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: airbrake_deployment
version_added: "1.2"
author: "Bruce Pennypacker (@bpennypacker)"
short_description: Notify airbrake about app deployments
description:
- Notify airbrake about app deployments (see http://help.airbrake.io/kb/api-2/deploy-tracking)
options:
token:
description:
- API token.
required: true
environment:
description:
- The airbrake environment name, typically 'production', 'staging', etc.
required: true
user:
description:
- The username of the person doing the deployment
required: false
repo:
description:
- URL of the project repository
required: false
revision:
description:
- A hash, number, tag, or other identifier showing what revision was deployed
required: false
url:
description:
- Optional URL to submit the notification to. Use to send notifications to Airbrake-compliant tools like Errbit.
required: false
default: "https://airbrake.io/deploys.txt"
version_added: "1.5"
validate_certs:
description:
- If C(no), SSL certificates for the target url will not be validated. This should only be used
on personally controlled sites using self-signed certificates.
required: false
default: 'yes'
choices: ['yes', 'no']
requirements: []
'''
EXAMPLES = '''
- airbrake_deployment: token=AAAAAA
environment='staging'
user='ansible'
revision=4.2
'''
import urllib
# ===========================================
# Module execution.
#
def main():
module = AnsibleModule(
argument_spec=dict(
token=dict(required=True),
environment=dict(required=True),
user=dict(required=False),
repo=dict(required=False),
revision=dict(required=False),
url=dict(required=False, default='https://api.airbrake.io/deploys.txt'),
validate_certs=dict(default='yes', type='bool'),
),
supports_check_mode=True
)
# build list of params
params = {}
if module.params["environment"]:
params["deploy[rails_env]"] = module.params["environment"]
if module.params["user"]:
params["deploy[local_username]"] = module.params["user"]
if module.params["repo"]:
params["deploy[scm_repository]"] = module.params["repo"]
if module.params["revision"]:
params["deploy[scm_revision]"] = module.params["revision"]
params["api_key"] = module.params["token"]
url = module.params.get('url')
# If we're in check mode, just exit pretending like we succeeded
if module.check_mode:
module.exit_json(changed=True)
# Send the data to airbrake
data = urllib.urlencode(params)
response, info = fetch_url(module, url, data=data)
if info['status'] == 200:
module.exit_json(changed=True)
else:
module.fail_json(msg="HTTP result code: %d connecting to %s" % (info['status'], url))
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
main()
|
Python
| 0.00005 |
@@ -2443,32 +2443,45 @@
ct(required=True
+, no_log=True
),%0A e
|
8cbce567c8dc6c78a48972f7918897056a99a854
|
fix to_3d
|
gdsfactory/export/to_3d.py
|
gdsfactory/export/to_3d.py
|
from typing import Optional, Tuple
import shapely
from gdsfactory.component import Component
from gdsfactory.layers import LayerColors
from gdsfactory.pdk import get_layer_colors, get_layer_stack
from gdsfactory.tech import LayerStack
from gdsfactory.types import Layer
def to_3d(
component: Component,
layer_colors: Optional[LayerColors] = None,
layer_stack: Optional[LayerStack] = None,
exclude_layers: Optional[Tuple[Layer, ...]] = None,
):
"""Return Component 3D trimesh Scene.
Args:
component: to exture in 3D.
layer_colors: layer colors from Klayout Layer Properties file.
Defaults to active PDK.layer_colors.
layer_stack: contains thickness and zmin for each layer.
Defaults to active PDK.layer_stack.
exclude_layers: layers to exclude.
"""
try:
import matplotlib.colors
from trimesh.creation import extrude_polygon
from trimesh.scene import Scene
except ImportError as e:
print("you need to `pip install trimesh`")
raise e
layer_colors = layer_colors or get_layer_colors()
layer_stack = layer_stack or get_layer_stack()
scene = Scene()
layer_to_thickness = layer_stack.get_layer_to_thickness()
layer_to_zmin = layer_stack.get_layer_to_zmin()
exclude_layers = exclude_layers or ()
has_polygons = False
for layer, polygons in component.get_polygons(by_spec=True).items():
if (
layer not in exclude_layers
and layer in layer_to_thickness
and layer in layer_to_zmin
):
height = layer_to_thickness[layer]
zmin = layer_to_zmin[layer]
layer_color = layer_colors.get_from_tuple(layer)
color_hex = layer_color.color
color_rgb = matplotlib.colors.to_rgb(color_hex)
for polygon in polygons:
p = shapely.geometry.Polygon(polygon.points)
mesh = extrude_polygon(p, height=height)
mesh.apply_translation((0, 0, zmin))
mesh.visual.face_colors = (*color_rgb, 0.5)
scene.add_geometry(mesh)
has_polygons = True
if not has_polygons:
raise ValueError(
f"{component.name!r} does not have polygons defined in the "
"layer_stack or layer_colors for the active Pdk {get_active_pdk().name!r}"
)
return scene
if __name__ == "__main__":
import gdsfactory as gf
c = gf.components.taper_strip_to_ridge()
# c = gf.components.straight()
s = to_3d(c)
s.show()
|
Python
| 0.00156 |
@@ -1434,16 +1434,32 @@
pec=True
+, as_array=False
).items(
|
9621de820ccbdd12a42bb4e4ff2f228ed245ee2e
|
Set velocity to maximum Maxon EC45 velocity.
|
epos_control_server.py
|
epos_control_server.py
|
#!/usr/bin/python
import logging.config
import signal
import threading
from flask import Flask, send_from_directory
from flask.ext.socketio import SocketIO
from epos_lib_wrapper import EposLibWrapper
from position_fetcher import PositionFetcher
POSITION_MAX_DELTA_TO_END = 0
EPOS_RELATIVE_POSITION = 20000000
EPOS_VELOCITY = 3000
# Instanciate Flask (Static files and REST API)
app = Flask(__name__)
# Instanciate SocketIO (Websockets, used for events) on top of it
socketio = SocketIO(app)
# EPOS2 control library
epos = None
# Position fetcher
position_fetch = None
# Watch position
watch_position = True
# Target position
target_position = 512
@app.route('/')
def index():
return send_from_directory('static', 'index.html')
@app.route('/js/<path:path>')
def static_js_proxy(path):
return send_from_directory('static/js/', path)
@socketio.on('moveTo', namespace='/servo')
def on_move_to(position):
global target_position
logging.error("Got move to %s", position)
target_position = position
@socketio.on('stop', namespace='/servo')
def on_stop():
stop()
def truncate_position(input_position):
try:
ret = int(input_position)
ret = min(ret, 1023)
ret = max(ret, 0)
return ret
except Exception:
return 512
def move_to(target_position):
position = truncate_position(target_position)
current_position, is_end = position_fetch.get_current_position()
if position < current_position and not (is_end and abs(position - current_position) < POSITION_MAX_DELTA_TO_END):
move_to_low()
elif position > current_position and not (is_end and abs(position - current_position) < POSITION_MAX_DELTA_TO_END):
move_to_high()
else:
logging.info("You asked me to move to %s, but position is %s, is_end: %s",
position, current_position, is_end)
stop()
def move_to_low():
logging.debug("Moving to lower")
epos.moveToPositionWithVelocity(-EPOS_RELATIVE_POSITION, EPOS_VELOCITY)
def move_to_high():
logging.debug("Moving to higher")
epos.moveToPositionWithVelocity(EPOS_RELATIVE_POSITION, EPOS_VELOCITY)
def stop():
logging.info("Stopping")
epos.stop()
def init_epos():
global epos
# Instanciate EPOS2 control library
epos = EposLibWrapper()
epos.openDevice()
def init_position_fetcher():
global position_fetch
position_fetch = PositionFetcher()
position_fetch.start()
def position_watcher():
while watch_position:
move_to(target_position)
logging.error("Position watcher stopped")
def sig_term_handler(signum, frame):
raise KeyboardInterrupt('Signal %i receivied!' % signum)
def main():
global watch_position
# Initialize logger
logging.config.fileConfig('log.ini')
try:
# Set signal handler for Shutdown
signal.signal(signal.SIGTERM, sig_term_handler)
init_position_fetcher()
init_epos()
watcher_thread = threading.Thread(target=position_watcher)
watcher_thread.start()
# Blocking! - Start Flask server
socketio.run(app, host='0.0.0.0')
except KeyboardInterrupt:
pass
finally:
if position_fetch:
position_fetch.stop()
watch_position = False
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -327,11 +327,11 @@
Y =
-300
+484
0%0A%0A#
|
fe4c426fe6384b570bcc2a105bdf04f2f412a31f
|
Use Query.executQuery for filterCasts.py
|
InformationScripting/scripts/filterCasts.py
|
InformationScripting/scripts/filterCasts.py
|
# filterCasts
casts = Query.ast(["-t=CastExpression"] + Query.args, [])
castTypeAttributes = Query.attribute(["-at=castType", "-s=of"], casts)
classUses = Query.uses(["-s=of", "-t=Class"], castTypeAttributes)
def hasTypeIdMethod( cl ):
for method in cl.methods:
if method.name == "typeIdStatic":
return True
return False
for tuple in classUses[0].tuples("uses"):
if hasTypeIdMethod(tuple.used):
values = [("ast", tuple.user)]
Query.result.add(Tuple(values))
Query.result = Query.toParent(["-t=CastExpression", "-addAs=node"], [Query.result])[0]
|
Python
| 0 |
@@ -9,19 +9,23 @@
Casts%0A%0Ac
+l
as
-t
+sUse
s = Quer
@@ -30,187 +30,99 @@
ery.
-ast(%5B%22-t=CastExpression%22%5D + Query.args, %5B%5D)%0AcastTypeAttributes = Query.attribute(%5B%22-at=castType%22, %22-s=of%22%5D, casts)%0AclassUses = Query.uses(%5B%22-s=of%22, %22-t=Class%22%5D, castTypeAttributes
+executeQuery('ast -t=CastExpression%7Cattribute -at=castType -input%7Cuses -input -t=Class', %5B%5D
)%0A%0Ad
|
69613f33b7b950dd536a5c4322214fbeaee89013
|
fix bug
|
generate_tests0000-0999.py
|
generate_tests0000-0999.py
|
from isochrones.dartmouth import Dartmouth_Isochrone
from isochrones.utils import addmags
import numpy as np
import pandas as pd
file = open('/tigress/np5/true_params.txt','a')
def get_index(n):
if n < 10:
index = '000' + str(n)
elif n < 100:
index = '00' + str(n)
elif n < 1000:
index = '0' + str(n)
else:
index = str(n)
for n in range(0,1000,1):
index = get_index(n)
file.write('test: ' + index + '\n')
dar = Dartmouth_Isochrone()
array = np.random.rand(2) + 0.5
if array[0] > array[1]:
M1 = array[0]
M2 = array[1]
else:
M1 = array[1]
M2 = array[0]
age1 = np.log10(1e8)
age2 = np.log10(5e8)
feh1 = 0.0
array = 1400*np.random.rand(2) + 100
if array[0] > array[1]:
distance1 = array[0]
distance2 = array[1]
else:
distance1 = array[1]
distance2 = array[0]
AV1 = 0.0
feh2 = 0.2
AV2 = 0.1
params = (M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2)
params = str(params)
file.write('(M1,M2,age1,age2,feh1,feh2,distance1,distance2,AV1,AV2) = ' + params + '\n')
file.write('\n')
#Simulate true magnitudes
unresolved_bands = ['J','H','K']
resolved_bands = ['i','K']
args1 = (age1, feh1, distance1, AV1)
args2 = (age2, feh2, distance2, AV2)
unresolved = {b:addmags(dar.mag[b](M1, *args1), dar.mag[b](M2, *args2)) for b in unresolved_bands}
resolved_1 = {b:dar.mag[b](M1, *args1) for b in resolved_bands}
resolved_2 = {b:dar.mag[b](M2, *args2) for b in resolved_bands}
#print dar.mag['K'](M2, *args2)
#print unresolved, resolved_1, resolved_2
instruments = ['twomass','RAO']
bands = {'twomass':['J','H','K'],
'RAO':['i','K']}
mag_unc = {'twomass': 0.02, 'RAO':0.1}
resolution = {'twomass':4.0, 'RAO':0.1}
relative = {'twomass':False, 'RAO':True}
separation = 0.5
PA = 100.
columns = ['name', 'band', 'resolution', 'relative', 'separation', 'pa', 'mag', 'e_mag']
df = pd.DataFrame(columns=columns)
i=0
for inst in ['twomass']: #Unresolved observations
for b in bands[inst]:
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = 0.
row['pa'] = 0.
row['mag'] = unresolved[b]
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
for inst in ['RAO']: #Resolved observations
for b in bands[inst]:
mags = [resolved_1[b], resolved_2[b]]
pas = [0, PA]
seps = [0., separation]
for mag,sep,pa in zip(mags,seps,pas):
row = {}
row['name'] = inst
row['band'] = b
row['resolution'] = resolution[inst]
row['relative'] = relative[inst]
row['separation'] = sep
row['pa'] = pa
row['mag'] = mag
row['e_mag'] = mag_unc[inst]
df = df.append(pd.DataFrame(row, index=[i]))
i += 1
#print df
df.to_csv(path_or_buf='/tigress/np5/df_binary_test{}.csv'.format(index))
file.close()
|
Python
| 0.000001 |
@@ -209,31 +209,30 @@
10:%0A
-index =
+return
'000' + str
@@ -261,23 +261,22 @@
-index =
+return
'00' +
@@ -309,23 +309,22 @@
-index =
+return
'0' + s
@@ -347,23 +347,22 @@
-index =
+return
str(n)%0A
|
866f9cbe01e360872e0b7f55b00f2683adffaabc
|
Fix typo
|
ckanext/mapactiontheme/controllers/admin_controller.py
|
ckanext/mapactiontheme/controllers/admin_controller.py
|
from ckan.controllers.admin import AdminController
import ckan.lib.base as base
import ckan.lib.helpers as h
import ckan.model as model
import ckan.logic as logic
from ckan.lib.base import BaseController
from ckan.plugins.toolkit import c, request, _
from ckan.authz import has_user_permission_for_group_or_org
class CustomAdminController(BaseController):
def __before__(self, action, **params):
super(CustomAdminController, self).__before__(action, **params)
context = {'model': model,
'user': c.user, 'auth_user_obj': c.userobj}
if action == u"trash" and c.user:
# 'delete_dataset' is a permision that only
# org `editor` or `admin` has
if has_user_permission_for_group_or_org('mapaction',
c.user,
'delete_dataset'):
context['ignore_auth'] = True
try:
logic.check_access('sysadmin', context, {})
except logic.NotAuthorized:
base.abort(403, _(
'Need to be system administrator to administer'))
else:
x = AdminController()
x.__before__(action, **params)
def trash(self):
c.deleted_revisions = model.Session.query(
model.Revision).filter_by(state=model.State.DELETED)
c.deleted_packages = model.Session.query(
model.Package).filter_by(state=model.State.DELETED)
if not request.params or (len(request.params) == 1 and '__no_cache__'
in request.params):
return base.render('admin/trash.html')
else:
# NB: we repeat retrieval of of revisions
# this is obviously inefficient (but probably not *that* bad)
# but has to be done to avoid (odd) sqlalchemy errors (when doing
# purge packages) of form: "this object already exists in the
# session"
msgs = []
if ('purge-packages' in request.params) or ('purge-revisions' in
request.params):
if 'purge-packages' in request.params:
revs_to_purge = []
for pkg in c.deleted_packages:
revisions = [x[0] for x in pkg.all_related_revisions]
# ensure no accidental purging of other(non-deleted)
# packages initially just avoided purging revisions
# where non-deleted packages were affected
# however this lead to confusing outcomes e.g.
# we succesfully deleted revision in which package
# was deleted (so package now active again) but no
# other revisions
problem = False
for r in revisions:
affected_pkgs = set(r.packages).\
difference(set(c.deleted_packages))
if affected_pkgs:
msg = _('Cannot purge package %s as '
'associated revision %s includes '
'non-deleted packages %s')
msg = msg % (pkg.id, r.id, [pkg.id for r
in affected_pkgs])
msgs.append(msg)
problem = True
break
if not problem:
revs_to_purge += [r.id for r in revisions]
model.Session.remove()
else:
revs_to_purge = [rev.id for rev in c.deleted_revisions]
revs_to_purge = list(set(revs_to_purge))
for id in revs_to_purge:
revision = model.Session.query(model.Revision).get(id)
try:
# TODO deleting the head revision corrupts the edit
# page Ensure that whatever 'head' pointer is used
# gets moved down to the next revision
model.repo.purge_revision(revision, leave_record=False)
except Exception, inst:
msg = _('Problem purging revision %s: %s') % (id, inst)
msgs.append(msg)
h.flash_success(_('Purge complete'))
else:
msgs.append(_('Action not implemented.'))
for msg in msgs:
h.flash_error(msg)
h.redirect_to(controller='admin', action='trash')
|
Python
| 0.999999 |
@@ -1751,19 +1751,16 @@
eval of
-of
revision
|
f5c94105f6652186e05ebe201f127a1c8b7bd94c
|
add script to download and save articles
|
newsplease/tests/downloadarticles.py
|
newsplease/tests/downloadarticles.py
|
import json
import os
name = 'trump-in-saudi-arabia.txt'
basepath = '/Users/felix/Downloads/'
download_dir = basepath + 'dir' + name + '/'
os.makedirs(download_dir)
articles = NewsPlease.download_from_file(basepath + name)
for url in articles:
article = articles[url]
with open(download_dir + article['filename'], 'w') as outfile:
json.dump(article, outfile)
|
Python
| 0 |
@@ -372,8 +372,9 @@
utfile)%0A
+%0A
|
09441298ca9a170d166d61502eaa0b0f29358f4a
|
Fix for threshold outside testrun
|
PreProcess.py
|
PreProcess.py
|
import numpy as np
import sys, os
import nrrd
if (len(sys.argv) < 3):
print 'Error: missing arguments!'
print 'e.g. python PreProcess.py image_ch1.nrrd image_ch2.nrrd [C #]' # t or tC = test run with no save.
else:
print 'Processing %s and %s...'% (str(sys.argv[1]), str(sys.argv[2]))
data1, header1 = nrrd.read(str(sys.argv[1]))
data2, header2 = nrrd.read(str(sys.argv[2]))
size = np.array(data1.shape) -1
print 'Image size is %s pixels.'% str(data2.shape)
c = -1
if (len(sys.argv) > 3):
if ('T' in str(sys.argv[3]).upper()):
testrun = True
print 'Test run...'
if ('C' in str(sys.argv[3]).upper()):
try:
c = int(sys.argv[4])
except ValueError:
print 'Problem with given clipping threshold (must be an integer) using 0'
c = 0
else:
testrun = False
s = 2
h = s + 1
d = 10
d1 = int(round(size[0] / s))
d2 = int(round(size[1] / s))
d3 = int(round(size[2] / h))
Rs1 = np.zeros([s])
Rs2 = np.zeros([s])
Rs1[0] = np.sum(data1[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs1[1] = np.sum(data1[d1-d:d1+d,d2-d:d2+d,s*d3:])
Rs2[0] = np.sum(data2[d1-d:d1+d,d2-d:d2+d,0:d3])
Rs2[1] = np.sum(data2[d1-d:d1+d,d2-d:d2+d,s*d3:])
if testrun:
print 'Results:'
print Rs1
print Rs2
#Cliping edges below threashold value c
if (c > -1):
Cl = 0
Cr = size[0]-1
Ct = 0
Cb = size[1]-1
Cv = 0
Cd = size[2]-1
#left
for y in range(1,d1):
if ((np.max(data1[0:y,0:,0:]) < c ) and (np.max(data2[0:y,0:,0:]) < c )):
Cl = y
else:
if testrun: print Cl
break
#right
for y in range(size[0],d1,-1):
if ((np.max(data1[y:,0:,0:]) < c ) and (np.max(data2[y:,0:,0:]) < c )):
Cr = y
else:
if testrun: print Cr
break
#top / anterior
for x in range(1,d2):
if ((np.max(data1[0:,0:x,0:]) < c ) and (np.max(data2[0:,0:x,0:]) < c )):
Ct = x
else:
if testrun: print Ct
break
#bottom / posterior
for x in range(size[1],d2,-1):
if ((np.max(data1[0:,x:,0:]) < c ) and (np.max(data2[0:,x:,0:]) < c )):
Cb = x
else:
if testrun: print Cb
break
#ventral
for z in range(1,d3):
if ((np.max(data1[0:,0:,0:z]) < c ) and (np.max(data2[0:,0:,0:z]) < c )):
Cv = z
else:
if testrun: print Cv
break
#dorsal
for z in range(size[2],d3,-1):
if ((np.max(data1[0:,0:,z:]) < c ) and (np.max(data2[0:,0:,z:]) < c )):
Cd = z
else:
if testrun: print Cd
break
data1c =data1[Cl:Cr,Ct:Cb,Cv:Cd]
data2c =data2[Cl:Cr,Ct:Cb,Cv:Cd]
if testrun:
print 'Clipping both images from %s to %s...'% (str(data1.shape), str(data1c.shape))
print 'Results saved to ClippedImageTestC[1,2].nrrd as test run...'
nrrd.write('ClippedImageTestC1.nrrd', data1c, options=header1)
nrrd.write('ClippedImageTestC2.nrrd', data2c, options=header2)
else:
print 'Clipping both images from %s to %s and saving...'% (str(data1.shape), str(data1c.shape))
nrrd.write(str(sys.argv[1]), data1c, options=header1)
nrrd.write(str(sys.argv[2]), data2c, options=header2)
data1 = data1c
data2 = data2c
if ( ((Rs2[0] > Rs2[1]) and (np.sum(Rs1) <= (1.5 * np.sum(Rs2)))) or ((Rs1[0] > Rs1[1]) and (np.sum(Rs1) > (1.5 * np.sum(Rs2))))):
print 'Flip required in Z axis'
data1 = np.flipud(data1)
data2 = np.flipud(data2)
if not testrun:
print 'Saving result to %s...'% str(sys.argv[1])
nrrd.write(str(sys.argv[1]), data1, options=header1)
print 'Saving result to %s...'% str(sys.argv[2])
nrrd.write(str(sys.argv[2]), data2, options=header2)
print 'Files saved - OK'
else:
print 'Changes not saved as just a test run.'
if (np.sum(Rs1) > (1.5 * np.sum(Rs2))): #1.5 times bias required to swap from default
print 'BG: C1\nSG: C2'
if not testrun:
os.rename(str(sys.argv[1]),str(sys.argv[1]).replace('_C1','_BG'))
os.rename(str(sys.argv[2]),str(sys.argv[2]).replace('_C2','_SG'))
print 'Files renamed - OK'
else:
print 'Changes not saved as just a test run.'
else:
print 'BG: C2\nSG: C1'
if not testrun:
os.rename(str(sys.argv[1]),str(sys.argv[1]).replace('_C1','_SG'))
os.rename(str(sys.argv[2]),str(sys.argv[2]).replace('_C2','_BG'))
print 'Files renamed - OK'
else:
print 'Changes not saved as just a test run.'
print 'Done.'
|
Python
| 0.000001 |
@@ -511,24 +511,49 @@
c = -1%0A %0A
+ testrun = False%0A %0A
if (len(
|
f6686169cf7344e0c75c6d060332d3692fc7df1c
|
Update curation table format
|
bin/trait_mapping/create_table_for_manual_curation.py
|
bin/trait_mapping/create_table_for_manual_curation.py
|
#!/usr/bin/env python3
import argparse
from eva_cttv_pipeline.trait_mapping.ols import (
get_ontology_label_from_ols, is_current_and_in_efo, is_in_efo,
)
def find_previous_mapping(trait_name, previous_mappings):
if trait_name not in previous_mappings:
return ''
uri = previous_mappings[trait_name]
label = get_ontology_label_from_ols(uri)
uri_is_current_and_in_efo = is_current_and_in_efo(uri)
uri_in_efo = is_in_efo(uri)
if uri_in_efo:
trait_status = 'EFO_CURRENT' if uri_is_current_and_in_efo else 'EFO_OBSOLETE'
else:
trait_status = 'NOT_CONTAINED'
trait_string = '|'.join([uri, label, 'NOT_SPECIFIED', 'previously-used', trait_status])
return trait_string
def find_exact_mapping(trait_name, mappings):
for mapping in mappings:
if mapping.lower().split('|')[1] == trait_name:
return mapping
return ''
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'-t', '--traits-for-curation',
help='Table with traits for which the pipeline failed to make a confident prediction')
parser.add_argument(
'-m', '--previous-mappings',
help='Table with all mappings previously issued by EVA')
parser.add_argument(
'-o', '--output',
help='Output TSV to be loaded in Google Sheets for manual curation')
args = parser.parse_args()
outfile = open(args.output, 'w')
# Load all previous mappings
previous_mappings = dict(l.rstrip().split('\t') for l in open(args.previous_mappings))
# Process all mappings which require manual curation
for line in open(args.traits_for_curation):
fields = line.rstrip().split('\t')
trait_name, trait_freq = fields[:2]
mappings = fields[2:]
previous_mapping = find_previous_mapping(trait_name, previous_mappings)
exact_mapping = find_exact_mapping(trait_name, mappings)
out_line = '\t'.join(
[trait_name, trait_freq,
# Mapping to use, if ready, comment, mapping URI, mapping label, whether exact, in EFO
'', '', '', '', '', '', '',
previous_mapping, exact_mapping] + mappings
) + '\n'
outfile.write(out_line)
outfile.close()
|
Python
| 0 |
@@ -2008,162 +2008,8 @@
req,
-%0A # Mapping to use, if ready, comment, mapping URI, mapping label, whether exact, in EFO%0A '', '', '', '', '', '', '',%0A
pre
|
7f48cdbf306a4cf59e975aa41ddfef663e366b77
|
Add test for non-empty database
|
tests/test_checker.py
|
tests/test_checker.py
|
import os
import sys
import stat
from random import randint, choice
from string import ascii_letters
import pytest
from botbot import checker, problems, checks, fileinfo
def create_random_directory_tree(ic, directory):
"""Create a directory tree with ic files in it (files and directories)"""
dp = directory
while ic:
name = ''.join(choice(ascii_letters) for _ in range(10))
if not randint(0, 3): # Make a file
dp.ensure(name)
ic -= 1
else:
dp = dp.mkdir(name)
# Tests for Checkern class methods
def test_checker_register_accept_single_function(tmpdir):
c = checker.OneshotChecker(sys.stdout, tmpdir.join('test.db').strpath)
c.register(lambda: print("Hello world!"))
assert len(c.checks) == 1
def test_checker_register_accept_function_list(tmpdir):
c = checker.OneshotChecker(sys.stdout, tmpdir.join('test.db').strpath)
# Function list
f = list()
f.append(lambda : print("Hello world!"))
f.append(lambda i : i + i)
c.register(*f)
assert len(c.checks) == 2
def test_oneshotchecker_checked_file_processing(tmpdir):
c = checker.OneshotChecker(sys.stdout, tmpdir.join('test.db').strpath)
assert len(c.checked) == 0
c.process_checked_file({
"problems": {}
})
assert len(c.checked) == 1
def test_oneshotchecker_finds_all_files(tmpdir):
c = checker.OneshotChecker(sys.stdout, tmpdir.join('test.db').strpath)
for i in range(10, 20):
tdir = tmpdir.mkdir(str(i))
create_random_directory_tree(i, tdir)
c.build_new_checklist(tdir.strpath, verbose=False)
assert len(c.checklist) == i
def test_oneshot_checker_populate_list_empty_db(tmpdir):
_TMPDIR_CT = 20
td = tmpdir.mkdir('doot')
create_random_directory_tree(_TMPDIR_CT, td)
c = checker.OneshotChecker(sys.stdout, tmpdir.join('test.db').strpath)
c.populate_checklist(td.strpath)
assert c.checklist
# Tests for checking functions
def test_symlink_checker_same_directory(tmpdir):
prev = tmpdir.chdir()
f = tmpdir.join('file.txt')
f.write('')
os.symlink(f.basename, 'link')
fi = fileinfo.FileInfo('file.txt')
lin = fileinfo.FileInfo('link')
assert not checker.is_link(fi['path'])
assert checker.is_link(lin['path'])
prev.chdir()
def test_symlink_checker_link_in_lower_directory(tmpdir):
prev = tmpdir.chdir()
f = tmpdir.join('file.txt')
f.write('')
fi = fileinfo.FileInfo('file.txt')
os.mkdir('newdir')
os.symlink(os.path.join('..', 'file.txt'),
os.path.join('newdir', 'link'))
lin = fileinfo.FileInfo(os.path.join('newdir', 'link'))
assert checker.is_link(lin['path'])
assert not checker.is_link(fi['path'])
prev.chdir()
def test_is_fastq(tmpdir):
prev = tmpdir.chdir()
bad = tmpdir.join('bad.fastq')
bad.write('')
b = fileinfo.FileInfo('bad.fastq')
os.symlink(bad.basename, 'good.fastq')
g = fileinfo.FileInfo('good.fastq')
assert checks.is_fastq(b) == 'PROB_FILE_IS_FASTQ'
assert checks.is_fastq(g) is None
def test_sam_raw_file_detection(tmpdir):
prev = tmpdir.chdir()
bad = tmpdir.join('bad.sam')
bad.write('')
f = fileinfo.FileInfo('bad.sam')
# Check raw file
assert checks.sam_should_compress(f) == 'PROB_SAM_SHOULD_COMPRESS'
prev.chdir()
def test_sam_and_bam_detection(tmpdir):
prev = tmpdir.chdir()
sam = tmpdir.join('bad.sam')
sam.write('')
sami = fileinfo.FileInfo('bad.sam')
assert checks.sam_should_compress(sami) == 'PROB_SAM_SHOULD_COMPRESS'
bam = tmpdir.join('bad.bam')
bam.write('')
bami = fileinfo.FileInfo('bad.sam')
assert checks.sam_should_compress(bami) is 'PROB_SAM_AND_BAM_EXIST'
prev.chdir()
def test_is_large_plaintext_affirmative():
fi = {
'path': 'test.txt',
'lastmod': 0,
'size': 1000000000000000,
}
result = checks.is_large_plaintext(fi)
assert result == 'PROB_OLD_LARGE_PLAINTEXT'
def test_is_large_plaintext_negatory():
fi = {
'path': 'bad.sam',
'lastmod': 2 ** 32, # This test will work until 2038
'size': 100
}
result = checks.is_large_plaintext(fi)
assert not result
|
Python
| 0.000019 |
@@ -1949,16 +1949,452 @@
cklist%0A%0A
+def test_oneshot_checker_update_list_with_entries(tmpdir):%0A _TMPDIR_CT = 20%0A td = tmpdir.mkdir('doot')%0A tf = td.join('test.txt').ensure()%0A%0A c = checker.OneshotChecker(sys.stdout, tmpdir.join('test.db').strpath)%0A files = %5Bfileinfo.FileInfo(tf.strpath)%5D%0A c.db.store_file_problems(*files)%0A%0A c.update_checklist(files)%0A%0A assert c.checklist%0A%0Adef test_oneshot_checker_populate_list_with_non_empty_db(tmpdir):%0A pass%0A%0A
# Tests
|
620210707477e6496ab665ec7df8afaf2ba408aa
|
consolidate test version conditional boilerplate into assertion method
|
tests/test_codegen.py
|
tests/test_codegen.py
|
"""
Part of the astor library for Python AST manipulation
License: 3-clause BSD
Copyright 2014 (c) Berker Peksag
"""
import ast
import sys
import textwrap
try:
import unittest2 as unittest
except ImportError:
import unittest
import astor
class CodegenTestCase(unittest.TestCase):
def assertAstSourceEqual(self, source):
self.assertEqual(astor.to_source(ast.parse(source)), source)
def test_imports(self):
source = "import ast"
self.assertAstSourceEqual(source)
source = "import operator as op"
self.assertAstSourceEqual(source)
source = "from math import floor"
self.assertAstSourceEqual(source)
def test_dictionary_literals(self):
source = "{'a': 1, 'b': 2}"
self.assertAstSourceEqual(source)
another_source = "{'nested': ['structures', {'are': 'important'}]}"
self.assertAstSourceEqual(another_source)
def test_try_expect(self):
source = textwrap.dedent("""\
try:
'spam'[10]
except IndexError:
pass""")
self.assertAstSourceEqual(source)
source = textwrap.dedent("""\
try:
'spam'[10]
except IndexError as exc:
sys.stdout.write(exc)""")
self.assertAstSourceEqual(source)
def test_del_statement(self):
source = "del l[0]"
self.assertAstSourceEqual(source)
source = "del obj.x"
self.assertAstSourceEqual(source)
def test_arguments(self):
source = textwrap.dedent("""\
j = [1, 2, 3]
def test(a1, a2, b1=j, b2='123', b3={}, b4=[]):
pass""")
self.assertAstSourceEqual(source)
def test_pass_arguments_node(self):
source = textwrap.dedent("""\
j = [1, 2, 3]
def test(a1, a2, b1=j, b2='123', b3={}, b4=[]):
pass""")
root_node = ast.parse(source)
arguments_node = [n for n in ast.walk(root_node)
if isinstance(n, ast.arguments)][0]
self.assertEqual(astor.to_source(arguments_node),
"a1, a2, b1=j, b2='123', b3={}, b4=[]")
def test_matrix_multiplication(self):
for source in ("(a @ b)", "a @= b"):
if sys.version_info >= (3, 5):
self.assertAstSourceEqual(source)
else:
# matrix multiplication operator introduced in Python 3.5
self.assertRaises(SyntaxError, ast.parse, source)
def test_multiple_unpackings(self):
source = textwrap.dedent("""\
my_function(*[1], *[2], **{'three': 3}, **{'four': 'four'})""")
if sys.version_info >= (3, 5):
self.assertAstSourceEqual(source)
else:
self.assertRaises(SyntaxError, ast.parse, source)
def test_async_def_with_for(self):
source = textwrap.dedent("""\
async def read_data(db):
async with connect(db) as db_cxn:
data = await db_cxn.fetch('SELECT foo FROM bar;')
async for datum in data:
if quux(datum):
return datum""")
if sys.version_info >= (3, 5):
self.assertAstSourceEqual(source)
else:
self.assertRaises(SyntaxError, ast.parse, source)
if __name__ == '__main__':
unittest.main()
|
Python
| 0.000024 |
@@ -395,32 +395,276 @@
rce)), source)%0A%0A
+ def assertAstSourceEqualIfAtLeastVersion(self, source, version_tuple):%0A if sys.version_info %3E= version_tuple:%0A self.assertAstSourceEqual(source)%0A else:%0A self.assertRaises(SyntaxError, ast.parse, source)%0A%0A
def test_imp
@@ -2493,245 +2493,64 @@
-if sys.version_info %3E= (3, 5):%0A self.assertAstSourceEqual(source)%0A else:%0A # matrix multiplication operator introduced in Python 3.5%0A self.assertRaises(SyntaxError, ast.parse, source
+self.assertAstSourceEqualIfAtLeastVersion(source, (3, 5)
)%0A%0A
@@ -2710,159 +2710,64 @@
-if sys.version_info %3E= (3, 5):%0A self.assertAstSourceEqual(source)%0A else:%0A self.assertRaises(SyntaxError, ast.parse, source
+self.assertAstSourceEqualIfAtLeastVersion(source, (3, 5)
)%0A%0A
@@ -3105,159 +3105,64 @@
-if sys.version_info %3E= (3, 5):%0A self.assertAstSourceEqual(source)%0A else:%0A self.assertRaises(SyntaxError, ast.parse, source
+self.assertAstSourceEqualIfAtLeastVersion(source, (3, 5)
)%0A%0A%0A
|
c5902af643d639ecefa756a0caaeeb58a7c6d151
|
Update P4_textToExcel working solution
|
books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py
|
books/AutomateTheBoringStuffWithPython/Chapter12/PracticeProjects/P4_textToExcel.py
|
# Write a program to read in the contents of several text files (you can make
# the text files yourself) and insert those contents into a spreadsheet, with
# one line of text per row. The lines of the first text file will be in the
# cells of column A, the lines of the second text file will be in the cells of
# column B, and so on.
import openpyxl
# Open workbook
wb = openpyxl.Workbook()
sheet = wb.active
# Get list of files
# Open file
# Scan lines into list
# Transpose list into relevant workbook column
# Close file
# Save workbook
wb.save("textToExcel.xlsx")
|
Python
| 0 |
@@ -343,16 +343,49 @@
openpyxl
+%0Aimport os%0A%0AFOLDER = %22./p4files/%22
%0A%0A# Open
@@ -462,46 +462,189 @@
les%0A
-%0A# Open file%0A%0A# Scan lines into list%0A%0A
+filelist = os.listdir(FOLDER)%0Afilelist.sort()%0A%0A# Open file%0Afor file in filelist:%0A with open(FOLDER + file) as fileObj:%0A index = 1%0A for line in fileObj:%0A
# Tr
@@ -653,18 +653,18 @@
spose li
-st
+ne
into re
@@ -690,21 +690,120 @@
umn%0A
-%0A# Close file
+ sheet.cell(row=index, column=(filelist.index(file) + 1)).value = line.strip()%0A index += 1
%0A%0A#
|
96adca2c21e37afd9a7c40d9aac7f0c6aa86ed84
|
Improve naming.
|
gitfs/views/passthrough.py
|
gitfs/views/passthrough.py
|
# Copyright 2014 PressLabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from fuse import FuseOSError
from errno import EACCES
from .view import View
STATS = ('st_atime', 'st_ctime', 'st_gid', 'st_mode', 'st_mtime', 'st_nlink',
'st_size', 'st_uid')
FS_STATS = ('f_bavail', 'f_bfree', 'f_blocks', 'f_bsize', 'f_favail',
'f_ffree', 'f_files', 'f_flag', 'f_frsize', 'f_namemax')
class PassthroughView(View):
def __init__(self, *args, **kwargs):
super(PassthroughView, self).__init__(*args, **kwargs)
self.root = kwargs['repo_path']
def _full_path(self, partial):
if partial.startswith("/"):
partial = partial[1:]
path = os.path.join(self.root, partial)
return path
def access(self, path, mode):
full_path = self._full_path(path)
if not os.access(full_path, mode):
raise FuseOSError(EACCES)
def chmod(self, path, mode):
full_path = self._full_path(path)
return os.chmod(full_path, mode)
def chown(self, path, uid, gid):
full_path = self._full_path(path)
return os.chown(full_path, uid, gid)
def getattr(self, path, fh=None):
full_path = self._full_path(path)
st = os.lstat(full_path)
return dict((key, getattr(st, key)) for key in STATS)
def readdir(self, path, fh):
full_path = self._full_path(path)
dirents = ['.', '..']
if os.path.isdir(full_path):
[dirents.append(entry)
for entry in os.listdir(full_path) if entry != '.git']
for directory in dirents:
yield directory
def readlink(self, path):
pathname = os.readlink(self._full_path(path))
if pathname.startswith("/"):
return os.path.relpath(pathname, self.root)
else:
return pathname
def mknod(self, path, mode, dev):
return os.mknod(self._full_path(path), mode, dev)
def rmdir(self, path):
return os.rmdir(self._full_path(path))
def mkdir(self, path, mode):
return os.mkdir(self._full_path(path), mode)
def statfs(self, path):
full_path = self._full_path(path)
stv = os.statvfs(full_path)
return dict((key, getattr(stv, key)) for key in FS_STATS)
def unlink(self, path):
return os.unlink(self._full_path(path))
def symlink(self, target, name):
return os.symlink(self._full_path(target), self._full_path(name))
def rename(self, old, new):
return os.rename(self._full_path(old), self._full_path(new))
def link(self, target, name):
return os.link(self._full_path(target), self._full_path(name))
def utimens(self, path, times=None):
return os.utime(self._full_path(path), times)
def open(self, path, flags):
full_path = self._full_path(path)
return os.open(full_path, flags)
def create(self, path, mode, fi=None):
full_path = self._full_path(path)
return os.open(full_path, os.O_WRONLY | os.O_CREAT, mode)
def read(self, path, length, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.read(fh, length)
def write(self, path, buf, offset, fh):
os.lseek(fh, offset, os.SEEK_SET)
return os.write(fh, buf)
def truncate(self, path, length, fh=None):
full_path = self._full_path(path)
with open(full_path, 'r+') as f:
f.truncate(length)
def flush(self, path, fh):
return os.fsync(fh)
def release(self, path, fh):
return os.close(fh)
def fsync(self, path, fdatasync, fh):
return os.fsync(fh)
|
Python
| 0.000017 |
@@ -1750,16 +1750,20 @@
st
+atus
= os.ls
@@ -1813,16 +1813,20 @@
tattr(st
+atus
, key))
|
7597497017053356cdfbebc38aa1468240df2e45
|
fix the install to ./install requirements
|
fabfile/build.py
|
fabfile/build.py
|
from fabric.api import task, local, execute
import clean
__all__ = ['req', 'sdist', 'install', 'sphinx']
@task
def req():
"""install the requirements"""
local("pip install -r requirements.txt")
@task
def sdist():
"""create the sdist"""
execute(clean.all)
local("python setup.py sdist --format=bztar,zip")
@task
def install():
"""install cloudmesh"""
local("pip install -r requirements.txt")
local("python setup.py install")
@task
def sphinx():
local("rm -rf /tmp/sphinx-contrib")
local("cd /tmp; hg clone http://bitbucket.org/birkenfeld/sphinx-contrib/")
local("cd /tmp/sphinx-contrib/autorun/; python setup.py install")
|
Python
| 0.000005 |
@@ -66,15 +66,8 @@
= %5B
-'req',
'sdi
@@ -97,106 +97,8 @@
'%5D%0A%0A
-@task%0Adef req():%0A %22%22%22install the requirements%22%22%22%0A local(%22pip install -r requirements.txt%22)%0A%0A
@tas
@@ -281,20 +281,18 @@
al(%22
-pip
+./
install
-r r
@@ -291,11 +291,8 @@
all
--r
requ
|
9646fb2b7f7f441c6630e04fa1e1af358f9c7d10
|
Set version to 0.20 final
|
eulexistdb/__init__.py
|
eulexistdb/__init__.py
|
# file eulexistdb/__init__.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Interact with `eXist-db`_ XML databases.
This package provides classes to ease interaction with eXist XML databases.
It contains the following modules:
* :mod:`eulexistdb.db` -- Connect to the database and query
* :mod:`eulexistdb.query` -- Query :class:`~eulxml.xmlmap.XmlObject`
models from eXist with semantics like a Django_ QuerySet
.. _eXist-db: http://exist.sourceforge.net/
.. _Django: http://www.djangoproject.com/
"""
__version_info__ = (0, 20, 0, 'dev')
# Dot-connect all but the last. Last is dash-connected if not None.
__version__ = '.'.join([str(i) for i in __version_info__[:-1]])
if __version_info__[-1] is not None:
__version__ += ('-%s' % (__version_info__[-1],))
|
Python
| 0.000037 |
@@ -1116,13 +1116,12 @@
0,
-'dev'
+None
)%0A%0A#
|
859e8c3ad30fa4b4899609fc136fa8f45f73b8d4
|
Add zoom functionality for the third axis of data cube
|
data-cube/volume_modify.py
|
data-cube/volume_modify.py
|
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2015, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
# vispy: gallery 2
"""
Example volume rendering
Controls:
* 1 - toggle camera between first person (fly), regular 3D (turntable) and
arcball
* 2 - toggle between volume rendering methods
* 3 - makes the data cube visible or not
* 4 - toggle between colormaps, depends on which rendering method is available
* 0 - reset cameras
* [] - decrease/increase isosurface threshold
With fly camera:
* WASD or arrow keys - move around
* SPACE - brake
* FC - move up-down
* IJKL or mouse - look around
"""
from itertools import cycle
import numpy as np
from vispy import app, scene, io
from vispy.color import get_colormaps, BaseColormap
import pyfits
# Read volume
fitsdata = pyfits.open('/Users/penny/PycharmProjects/untitled/l1448_13co.fits')
# fitsdata = pyfits.open('/Users/penny/Documents/CO/G25.4CO.fits')
#filename = input('Please input fits file name: ')
#if filename is not None:
# fitsdata = pyfits.open(filename)
#else:
# quit
naxis = fitsdata[0].header['NAXIS']
image = fitsdata[0].data
print naxis
if naxis < 3:
print 'The data should not be less than 3 dimensions !'
quit
elif naxis > 3:
image = fitsdata[0].data[0,:,:,:]
print image.shape
# Set all nan to zero for display
vol1 = np.nan_to_num(np.array(image))
# Prepare canvas
canvas = scene.SceneCanvas(keys='interactive', size=(800, 600), show=True)
canvas.measure_fps()
# Set up a viewbox to display the image with interactive pan/zoom
view = canvas.central_widget.add_view()
# Set whether we are emulating a 3D texture
emulate_texture = False
# Create the volume visuals, only one is visible
volume1 = scene.visuals.Volume(vol1, parent=view.scene, threshold=0.1,
emulate_texture=emulate_texture)
# volume1.transform = scene.STTransform(translate=(64, 64, 0))
# Create two cameras (1 for firstperson, 3 for 3d person)
fov = 60.
cam1 = scene.cameras.FlyCamera(parent=view.scene, fov=fov, name='Fly')
cam2 = scene.cameras.TurntableCamera(parent=view.scene, fov=fov,
name='Turntable')
cam3 = scene.cameras.ArcballCamera(parent=view.scene, fov=fov, name='Arcball')
view.camera = cam2 # Select turntable at first
fitsdata.close()
# create colormaps that work well for translucent and additive volume rendering
class TransFire(BaseColormap):
glsl_map = """
vec4 translucent_fire(float t) {
return vec4(pow(t, 0.5), t, t*t, max(0, t*1.05 - 0.05));
}
"""
class TransGrays(BaseColormap):
glsl_map = """
vec4 translucent_grays(float t) {
return vec4(t, t, t, t*0.05);
}
"""
# Setup colormap iterators
opaque_cmaps = cycle(get_colormaps())
translucent_cmaps = cycle([TransFire(), TransGrays()])
opaque_cmap = next(opaque_cmaps)
translucent_cmap = next(translucent_cmaps)
result = 1
# Implement key presses
@canvas.events.key_press.connect
def on_key_press(event):
# result =1 # invoke every press...
global opaque_cmap, translucent_cmap, result
if event.text == '1':
cam_toggle = {cam1: cam2, cam2: cam3, cam3: cam1}
view.camera = cam_toggle.get(view.camera, cam2)
print(view.camera.name + ' camera')
elif event.text == '2':
methods = ['mip', 'translucent', 'iso', 'additive']
method = methods[(methods.index(volume1.method) + 1) % 4]
print("Volume render method: %s" % method)
cmap = opaque_cmap if method in ['mip', 'iso'] else translucent_cmap
volume1.method = method
volume1.cmap = cmap
elif event.text == '3':
volume1.visible = not volume1.visible
elif event.text == '4':
if volume1.method in ['mip', 'iso']:
cmap = opaque_cmap = next(opaque_cmaps)
else:
cmap = translucent_cmap = next(translucent_cmaps)
volume1.cmap = cmap
elif event.text == '0':
cam1.set_range()
cam3.set_range()
elif event.text != '' and event.text in '[]':
s = -0.025 if event.text == '[' else 0.025
volume1.threshold += s
th = volume1.threshold if volume1.visible else volume2.threshold
print("Isosurface threshold: %0.3f" % th)
#Add zoom out functionality for the third dimension
elif event.text != '' and event.text in '=-':
z = -1 if event.text == '-' else +1
result += z
if result > 0:
volume1.transform = scene.STTransform(scale=(1, 1, result))
else:
result = 1
print("Volume scale: %d" % result)
# for testing performance
#@canvas.connect
#def on_draw(ev):
#canvas.update()
if __name__ == '__main__':
print(__doc__)
app.run()
|
Python
| 0.000001 |
@@ -705,16 +705,69 @@
hreshold
+%0A* =- - increase/decrease the scale of the third axis
%0A%0AWith f
@@ -4503,16 +4503,17 @@
%25 th)%0A#
+
Add zoom
|
14cb7c49d5b2e155e92c07ecd1e58dc386b0ddb3
|
stop failing on exceptions--skip instead
|
parsing/parsinglib/jobcontainer.py
|
parsing/parsinglib/jobcontainer.py
|
from ..models import Job
import datetime
class JobContainer():
def __init__(self):
self.organization = None
self.title = None
self.division = None
self.date_posted = None
self.date_closing = None
self.date_collected = None
self.url_detail = None
self.salary_waged = None
self.salary_amount = None
self.region = None
def is_unique(self):
""" Checks whether job (denoted by URL) already exists in DB.
Remember to use this function before doing any intense parsing operations.
"""
if not self.url_detail:
raise KeyError("Queried record uniqueness before detail URL set: {}".format(self))
else:
if len(Job.objects.filter(url_detail=self.url_detail)) == 0:
return True
else:
# print("Job already exists in DB: {}".format(self.url_detail))
return False
def cleanup(self):
self.title = self.title.title() if self.title.isupper() else self.title
self.salary_amount = 0 if self.salary_amount == None else self.salary_amount
self.salary_waged = True if self.salary_amount < 5000 else False # totally arbitray amount
self.date_collected = datetime.date.today()
def validate(self):
field_dict = self.__dict__
attributes = {k:v for k, v in field_dict.items() if not k.startswith("_")}
for k, v in attributes.items():
if v == None:
raise KeyError("Job {} was missing {}".format(self.url_detail, k))
def save(self):
""" Save job to DB, after final checks.
"""
if not self.is_unique(): # failsafe in case we forgot to check this earlier.
print("{} tried to save a job that is not unique!".format(self.organization))
return
self.cleanup()
self.validate()
print("Saved job to DB: {}".format(self))
j = Job(organization=self.organization
, title=self.title
, division=self.division
, date_posted=self.date_posted
, date_closing=self.date_closing
, url_detail=self.url_detail
, salary_waged=self.salary_waged
, salary_amount=self.salary_amount
, region=self.region
, date_collected = self.date_collected
)
j.save()
def __str__(self):
return "{} at {}".format(self.title, self.organization)
|
Python
| 0 |
@@ -35,16 +35,17 @@
tetime%0A%0A
+%0A
class Jo
@@ -54,24 +54,25 @@
ontainer():%0A
+%0A
def __in
@@ -646,16 +646,33 @@
eyError(
+%0A
%22Queried
@@ -1154,32 +1154,66 @@
f.salary_amount%0A
+ # totally arbitray amount%0A
self.sal
@@ -1272,34 +1272,8 @@
alse
- # totally arbitray amount
%0A
@@ -1407,10 +1407,24 @@
= %7B
+%0A
k:
+
v fo
@@ -1575,16 +1575,37 @@
eyError(
+%0A
%22Job %7B%7D
@@ -1761,16 +1761,17 @@
nique():
+
# fails
@@ -1833,16 +1833,33 @@
print(
+%0A
%22%7B%7D trie
@@ -1874,17 +1874,16 @@
e a job
-t
hat is n
@@ -1963,16 +1963,33 @@
eanup()%0A
+ try:%0A
@@ -2004,16 +2004,132 @@
idate()%0A
+ except KeyError as err:%0A print(%22%7C%7C EXCEPTION%22)%0A print(%22%7C%7C %22, err)%0A return%0A%0A
@@ -2216,33 +2216,16 @@
nization
-%0A
, title=
@@ -2234,33 +2234,16 @@
lf.title
-%0A
, divisi
@@ -2258,33 +2258,16 @@
division
-%0A
, date_p
@@ -2288,33 +2288,16 @@
e_posted
-%0A
, date_c
@@ -2320,33 +2320,16 @@
_closing
-%0A
, url_de
@@ -2348,33 +2348,16 @@
l_detail
-%0A
, salary
@@ -2380,33 +2380,16 @@
ry_waged
-%0A
, salary
@@ -2414,33 +2414,16 @@
y_amount
-%0A
, region
@@ -2434,33 +2434,16 @@
f.region
-%0A
, date_c
@@ -2450,19 +2450,17 @@
ollected
- =
+=
self.dat
|
d7628bc32a0ba75bb5070ae7774e92039be735df
|
Fix manage_users
|
aetherguild/manage_users.py
|
aetherguild/manage_users.py
|
# -*- coding: utf-8 -*-
import argparse
from getpass import getpass
from listener_service.tables import User
import config
import tabulate
from sqlalchemy.orm import sessionmaker
from sqlalchemy import create_engine
from passlib.hash import pbkdf2_sha512
def check_args(a):
if not a.username:
print("Username parameter is required for this operation.")
return 1
if a.username:
if len(a.username) > 32 or len(a.username) < 4:
print("Username must be between 4 and 32 characters long")
return 1
if a.password:
if len(a.password) < 8:
print("Password must at least 8 characters long")
return 1
if a.nick:
if len(a.nick) > 32 or len(a.nick) < 2:
print("Nickname must be between 4 and 32 characters long")
return 1
else:
a.nick = a.username
return 0
def add_user(a):
# Require password for new users. If one is not given vie commandline, get it here.
if not a.password or a.password == '':
a.password = getpass("Password: ")
# Check inputs
ret_val = check_args(a)
if ret_val != 0:
return ret_val
s = db_session()
user = User()
user.username = a.username
user.nickname = a.nick
user.level = userlevels_choices.index(a.level)
user.active = a.active
user.password = pbkdf2_sha512.encrypt(a.password)
s.add(user)
try:
s.commit()
except User.IntegrityError as e:
print("Error: {}".format(e.message))
return 1
finally:
s.close()
print("User {} succesfully added!".format(a.username))
return 0
def del_user(a):
ret_val = check_args(a)
if ret_val != 0:
return ret_val
s = db_session()
User.delete(s, username=a.username)
s.commit()
s.close()
print("User {} deleted".format(a.username))
return 0
def edit_user(a):
# Check if user wants to give password but not via commandline
if a.password == '':
a.password = getpass("Password: ")
ret_val = check_args(a)
if ret_val != 0:
return ret_val
s = db_session()
try:
user = User.get_one(s, username=a.username)
if a.nick:
user.nickname = a.nick
if a.active:
user.active = a.active
if a.level:
user.level = userlevels_choices.index(a.level)
if a.password:
user.password = pbkdf2_sha512.encrypt(a.password)
s.add(user)
s.commit()
except User.NoResultFound:
print("User {} not found.".format(a.username))
return 1
finally:
s.close()
print("User {} edited".format(a.username))
return 0
def list_users(a):
s = db_session()
userlist = []
for user in User.get_many(s):
ser = user.serialize()
ser['level'] = userlevels_choices[ser['level']]
ser['active'] = 'Yes' if ser['active'] else 'No'
userlist.append(ser)
s.close()
headers = {
'id': 'ID',
'username': 'Username',
'nickname': 'Nickname',
'level': 'Level',
'active': 'Active',
'created_at': 'Created At',
'last_contact': 'Last Contact At'
}
print(tabulate.tabulate(userlist, headers, tablefmt="grid"))
return 0
if __name__ == '__main__':
userlevels_choices = ['guest', 'user', 'admin']
ops_choices = ['add', 'delete', 'edit', 'list']
# Form the argument parser (first argument is positional and required)
parser = argparse.ArgumentParser(description='Manage users for the website')
parser.add_argument('operation', nargs='+', choices=ops_choices, help='Operation')
parser.add_argument('--username', type=str, help='Username')
parser.add_argument('--password', type=str, nargs='?', help='Password', default='')
parser.add_argument('--nick', type=str, help='User nickname')
parser.add_argument('--active', type=bool, help='Is the user active', default=True)
parser.add_argument('--level', type=str, choices=userlevels_choices, help='User privilege level', default='user')
args = parser.parse_args()
# Initialize a database session
db_session = sessionmaker()
engine = create_engine(config.DATABASE_CONFIG, pool_recycle=3600)
db_session.configure(bind=engine)
# Find the correct operation function and call it with arguments as a parameter
op = {
'add': add_user,
'delete': del_user,
'edit': edit_user,
'list': list_users
}[args.operation[0]]
exit(op(args))
|
Python
| 0.000002 |
@@ -1330,35 +1330,8 @@
el)%0A
- user.active = a.active%0A
@@ -1380,16 +1380,16 @@
ssword)%0A
+
%0A s.a
@@ -2250,22 +2250,23 @@
if a.
-active
+deleted
:%0A
@@ -2280,25 +2280,27 @@
ser.
-active = a.active
+deleted = a.deleted
%0A
@@ -2888,22 +2888,23 @@
ser%5B'
-active
+deleted
'%5D = 'Ye
@@ -2914,22 +2914,23 @@
if ser%5B'
-active
+deleted
'%5D else
@@ -3116,24 +3116,26 @@
'
-active': 'Active
+deleted': 'Deleted
',%0A
@@ -3825,16 +3825,16 @@
ult='')%0A
+
pars
@@ -3895,96 +3895,8 @@
e')%0A
- parser.add_argument('--active', type=bool, help='Is the user active', default=True)%0A
|
f633df6bb8e0e84699db2f47178f4b402ccc07a8
|
Fix `OverflowError`.
|
eventkit/utils/time.py
|
eventkit/utils/time.py
|
from datetime import timedelta
from timezone import timezone
ROUND_DOWN = 'ROUND_DOWN'
ROUND_NEAREST = 'ROUND_NEAREST'
ROUND_UP = 'ROUND_UP'
WEEKDAYS = {
'MON': 0,
'TUE': 1,
'WED': 2,
'THU': 3,
'FRI': 4,
'SAT': 5,
'SUN': 6,
}
MON = 'MON'
TUE = 'TUE'
WED = 'WED'
THU = 'THU'
FRI = 'FRI'
SAT = 'SAT'
SUN = 'SUN'
def round_datetime(when=None, precision=60, rounding=ROUND_NEAREST):
"""
Round a datetime object to a time that matches the given precision.
when (datetime), default now
The datetime object to be rounded.
precision (int, timedelta, str), default 60
The number of seconds, weekday (MON, TUE, WED, etc.) or timedelta
object to which the datetime object should be rounded.
rounding (str), default ROUND_NEAREST
The rounding method to use (ROUND_DOWN, ROUND_NEAREST, ROUND_UP).
"""
when = when or timezone.now()
weekday = WEEKDAYS.get(precision, WEEKDAYS['MON'])
if precision in WEEKDAYS:
precision = int(timedelta(days=7).total_seconds())
elif isinstance(precision, timedelta):
precision = int(precision.total_seconds())
# Get delta between the beginning of time and the given datetime object.
# If precision is a weekday, the beginning of time must be that same day.
when_min = when.min + timedelta(days=weekday)
if timezone.is_aware(when):
when_min = \
timezone.datetime(tzinfo=when.tzinfo, *when_min.timetuple()[:3])
delta = when - when_min
remainder = int(delta.total_seconds()) % precision
# First round down and strip microseconds.
when -= timedelta(seconds=remainder, microseconds=when.microsecond)
# Then add precision to round up.
if rounding == ROUND_UP or (
rounding == ROUND_NEAREST and remainder >= precision / 2):
when += timedelta(seconds=precision)
return when
|
Python
| 0 |
@@ -9,24 +9,34 @@
etime import
+ datetime,
timedelta%0A%0A
@@ -1434,42 +1434,178 @@
-when_min = %5C%0A timezone.
+# It doesn't seem to be possible to localise the %60min%60 datetime without%0A # raising %60OverflowError%60, so create a timezone aware object manually.%0A when_min =
date
|
701fc978271f8e0fc203374e05a40f23132b78b9
|
Add convenience methods for active version number and Service Details API
|
fastly/models.py
|
fastly/models.py
|
"""
"""
from string import Template
from copy import copy
from six.moves.urllib.parse import urlencode
class Model(object):
def __init__(self):
self._original_attrs = None
self.attrs = {}
@classmethod
def query(cls, conn, pattern, method, suffix='', body=None, **kwargs):
url = Template(pattern).substitute(**kwargs)
url += suffix
headers = { 'Content-Accept': 'application/json' }
if method == 'POST' or method == 'PUT':
headers['Content-Type'] = 'application/x-www-form-urlencoded'
return conn.request(method, url, body, headers)
def _query(self, method, suffix='', body=None):
return self.__class__.query(self.conn, self.INSTANCE_PATTERN, method, suffix, body, **self.attrs)
def _collection_query(self, method, suffix='', body=None):
return self.__class__.query(self.conn, self.COLLECTION_PATTERN, method, suffix, body, **self.attrs)
def save(self):
if self._original_attrs == self.attrs:
return False
if self._original_attrs:
out = {}
for k in self.attrs:
if self.attrs[k] != self._original_attrs[k]:
out[k] = self.attrs[k]
params_str = urlencode(out)
resp, data = self._query('PUT', body=params_str)
else:
params_str = urlencode(self.attrs)
resp, data = self._collection_query('POST', body=params_str)
self._original_attrs = data
self.attrs = data
return True
def delete(self):
resp, data = self._query('DELETE')
return data
@classmethod
def list(cls, conn, **kwargs):
resp, data = cls.query(conn, cls.COLLECTION_PATTERN, 'GET', **kwargs)
collection = []
if resp.status == 200 and hasattr(data, 'sort'):
for i in range(0, len(data)):
obj = cls.construct_instance(data[i])
obj.conn = conn
collection.append(obj)
return collection
@classmethod
def find(cls, conn, **kwargs):
resp, data = cls.query(conn, cls.INSTANCE_PATTERN, 'GET', **kwargs)
obj = cls.construct_instance(data)
obj.conn = conn
return obj
@classmethod
def create(cls, conn, data):
instance = cls.construct_instance(data, new=True)
instance.conn = conn
instance.save()
return instance
@classmethod
def construct_instance(cls, data, new=False):
obj = cls()
if not new:
obj._original_attrs = data
obj.attrs = copy(data)
return obj
class Service(Model):
COLLECTION_PATTERN = '/service'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$id'
def purge_key(self, key):
self._query('POST', '/purge/%s' % key)
def purge_all(self):
self._query('POST', '/purge_all')
def version(self):
""" Create a new version under this service. """
return Version.create(self.conn, {
# Parent params
'service_id': self.attrs['id'],
})
class Version(Model):
COLLECTION_PATTERN = Service.COLLECTION_PATTERN + '/$service_id/version'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$number'
def check_backends(self):
resp, data = self._query('GET', '/backend/check_all')
return data
def activate(self):
resp, data = self._query('PUT', '/activate')
return data
def deactivate(self):
resp, data = self._query('PUT', '/deactivate')
return data
def clone(self):
resp, data = self._query('PUT', '/clone')
return data
def validate(self):
resp, data = self._query('GET', '/validate')
return data
def lock(self):
resp, data = self._query('PUT', '/lock')
return data
def boilerplate(self):
resp, data = self._query('GET', '/boilerplate')
return data
def generated_vcl(self):
resp, data = self._query('GET', '/generated_vcl')
return data
def vcl(self, name, content):
""" Create a new VCL under this version. """
return VCL.create(self.conn, {
# Parent params
'service_id': self.attrs['service_id'],
'version': self.attrs['number'],
# New instance params
'name': name,
'content': content,
})
class Domain(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/domain'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
def check_cname(self):
resp, data = self._query('GET', '/check')
return (data[1], data[2])
class Backend(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/backend'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class Director(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/director'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class Origin(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/origin'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class Healthcheck(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/healthcheck'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class Syslog(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/syslog'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class User(Model):
COLLECTION_PATTERN = '/user/$id'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$id'
class Settings(Model):
INSTANCE_PATTERN = Version.COLLECTION_PATTERN + '/$version/settings'
COLLECTION_PATTERN = INSTANCE_PATTERN
class Condition(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/condition'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class Header(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/header'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
class VCL(Model):
COLLECTION_PATTERN = Version.COLLECTION_PATTERN + '/$version/vcl'
INSTANCE_PATTERN = COLLECTION_PATTERN + '/$name'
def download(self):
resp, data = self._query('GET', '/download')
return data
def main(self):
resp, data = self._query('PUT', '/main')
return data
|
Python
| 0 |
@@ -2744,24 +2744,335 @@
N + '/$id'%0A%0A
+ def details(self):%0A resp, data = self._query('GET', '/details')%0A return data%0A%0A def get_active_version_number(self):%0A versions = self.attrs.get('versions')%0A if versions:%0A return list(filter(lambda x: x%5B'active'%5D is True, versions))%5B0%5D%5B'number'%5D%0A return None%0A%0A
def purg
|
09a6c3b5d860f8bbfafec9f5cdb4cef00cdae9c9
|
Implement an additional test for handling exceptions in bake
|
tests/test_cookies.py
|
tests/test_cookies.py
|
# -*- coding: utf-8 -*-
import json
def test_help_message(testdir):
result = testdir.runpytest(
'--help',
)
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'cookies:',
'*--template=TEMPLATE*',
])
def test_cookies_fixture(testdir):
"""Make sure that pytest accepts the `cookies` fixture."""
# create a temporary pytest test module
testdir.makepyfile("""
def test_valid_fixture(cookies):
assert hasattr(cookies, 'bake')
assert callable(cookies.bake)
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v')
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_valid_fixture PASSED',
])
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
def test_cookies_bake(testdir):
"""Programmatically create a **Cookiecutter** template and use `bake` to
create a project from it.
"""
template = testdir.tmpdir.ensure('cookiecutter-template', dir=True)
template_config = {
'repo_name': 'foobar',
'short_description': 'Test Project'
}
template.join('cookiecutter.json').write(json.dumps(template_config))
template_readme = '\n'.join([
'{{cookiecutter.repo_name}}',
'{% for _ in cookiecutter.repo_name %}={% endfor %}',
'{{cookiecutter.short_description}}',
])
repo = template.ensure('{{cookiecutter.repo_name}}', dir=True)
repo.join('README.rst').write(template_readme)
testdir.makepyfile("""
def test_bake_project(cookies):
result = cookies.bake(extra_context={'repo_name': 'helloworld'})
assert result.exit_code == 0
assert result.exception is None
assert result.project.basename == 'helloworld'
assert result.project.isdir()
def test_bake_should_create_new_output(cookies):
first_result = cookies.bake()
assert first_result.project.dirname.endswith('bake00')
second_result = cookies.bake()
assert second_result.project.dirname.endswith('bake01')
""")
# run pytest with the following cmd args
result = testdir.runpytest('-v', '--template={}'.format(template))
# fnmatch_lines does an assertion internally
result.stdout.fnmatch_lines([
'*::test_bake_project PASSED',
])
|
Python
| 0.000001 |
@@ -2440,24 +2440,1127 @@
project PASSED',%0A %5D)%0A
+%0A%0Adef test_cookies_bake_should_handle_exception(testdir):%0A %22%22%22Programmatically create a **Cookiecutter** template and make sure that%0A cookies.bake() handles exceptions that happen during project generation.%0A%0A We expect **Cookiecutter** to raise a %60NonTemplatedInputDirException%60.%0A %22%22%22%0A template = testdir.tmpdir.ensure('cookiecutter-fail', dir=True)%0A%0A template_config = %7B%0A 'repo_name': 'foobar',%0A 'short_description': 'Test Project'%0A %7D%0A template.join('cookiecutter.json').write(json.dumps(template_config))%0A%0A template.ensure('cookiecutter.repo_name', dir=True)%0A%0A testdir.makepyfile(%22%22%22%0A def test_bake_should_fail(cookies):%0A result = cookies.bake()%0A%0A assert result.exit_code == -1%0A assert result.exception is not None%0A assert result.project is None%0A %22%22%22)%0A%0A # run pytest with the following cmd args%0A result = testdir.runpytest('-v', '--template=%7B%7D'.format(template))%0A%0A # fnmatch_lines does an assertion internally%0A result.stdout.fnmatch_lines(%5B%0A '*::test_bake_should_fail PASSED',%0A %5D)%0A
|
3b4de1be81c7951ca064ff46e1f3e1ed95436ae3
|
fix XSS vulnerability
|
django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py
|
django_bootstrap_breadcrumbs/templatetags/django_bootstrap_breadcrumbs.py
|
# -*- coding: utf-8 -*-
"""
:copyright: Copyright 2013 by Łukasz Mierzwa
:contact: [email protected]
"""
from inspect import ismethod
from django.core.urlresolvers import reverse, NoReverseMatch
from django.utils.safestring import mark_safe
from django.utils.translation import ugettext as _
from django.db.models import Model
from django import template
register = template.Library()
CONTEXT_KEY = 'DJANGO_BREADCRUMB_LINKS'
def breadcrumb(context, label, viewname, *args):
"""
Add link to list of breadcrumbs, usage:
{% load bubbles_breadcrumbs %}
{% breadcrumb "Home" "index" %}
Remember to use it inside {% block %} with {{ block.super }} to get all
parent breadcrumbs.
:param label: Breadcrumb link label.
:param viewname: Name of the view to link this breadcrumb to, or Model
instance with implemented get_absolute_url().
:param args: Any arguments to view function.
"""
context['request'].META[CONTEXT_KEY] = context['request'].META.get(
CONTEXT_KEY, []) + [(label, viewname, args)]
return ''
def render_breadcrumbs(context):
"""
Render breadcrumbs html using twitter bootstrap css classes.
"""
links = []
for (label, viewname, args) in context['request'].META.get(
CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url()
else:
try:
url = reverse(viewname=viewname, args=args)
except NoReverseMatch:
url = viewname
links.append((url, _(unicode(label)) if label else label))
if not links:
return ''
ret = '<ul class="breadcrumb">'
total = len(links)
for (i, (url, label)) in enumerate(links, 1):
ret += '<li>'
if total > 1 and i < total:
ret += '<a href="%s">%s</a>' % (url, label)
ret += ' <span class="divider">/</span>'
else:
ret += label
ret += '</li>'
ret += '</ul>'
return mark_safe(ret)
register.simple_tag(takes_context=True)(breadcrumb)
register.simple_tag(takes_context=True)(render_breadcrumbs)
|
Python
| 0 |
@@ -201,16 +201,53 @@
seMatch%0A
+from django.utils.html import escape%0A
from dja
@@ -1090,21 +1090,29 @@
%5B%5D) + %5B(
+escape(
label
+)
, viewna
|
41ea0dd8c48ef8a336422482e9bbd1911bb7e168
|
Make that it works in 90% of the cases. 3:30.
|
Commitment.py
|
Commitment.py
|
import sublime
import sublime_plugin
from commit import Commitment
whatthecommit = 'http://whatthecommit.com/'
randomMessages = Commitment()
class CommitmentToClipboardCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = commit.get('message', '')
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.set_clipboard(message)
class CommitmentToStatusBarCommand(sublime_plugin.WindowCommand):
def run(self):
commit = randomMessages.get()
message = commit.get('message', '')
message_hash = commit.get('message_hash', '')
if message:
print 'Commitment: ' + message + '\n' + 'Permalink: ' + whatthecommit + message_hash
sublime.status_message(message)
|
Python
| 0.999808 |
@@ -29,16 +29,34 @@
e_plugin
+%0Aimport HTMLParser
%0A%0Afrom c
@@ -294,32 +294,106 @@
e =
-commit.get('message', ''
+HTMLParser.HTMLParser().unescape(commit.get('message', '').replace('%5Cn','').replace('%3Cbr/%3E', '%5Cn')
)%0A
@@ -479,32 +479,39 @@
'Commitment: ' +
+ '%5Cn' +
message + '%5Cn'
@@ -737,32 +737,106 @@
e =
-commit.get('message', ''
+HTMLParser.HTMLParser().unescape(commit.get('message', '').replace('%5Cn','').replace('%3Cbr/%3E', '%5Cn')
)%0A
@@ -930,16 +930,23 @@
ent: ' +
+ '%5Cn' +
message
|
6b85753357bbadd7ae0c0b1b814d7b10065e536b
|
clean up a little
|
tests/test_crawler.py
|
tests/test_crawler.py
|
# -*- coding: utf-8 -*-
import copy
import pytest
from urlparse import urljoin
from bs4.element import Tag
from crawly import crawler
from crawly.href import Href
def test_live_site_without_exception(site):
"""Crawls a live site without raising an exception"""
crawler.crawl(Href(urljoin(site, '/index.html')))
@pytest.mark.parametrize('href,origin,visited', [
# Absolute, same origin, not visited before
('http://example.com/resource.html', 'http://example.com', None),
('https://example.com/resource.html', 'http://example.com', None),
# Absolute (double slash), same origin, not visited before
('//www.example.com/resource.html', 'http://www.example.com', None),
# Different subdomains
('http://foo.example.com/resource.html', 'http://www.example.com', None),
('http://foo.bar.example.com/resource.html', 'http://www.example.com', None),
('http://example.com/resource.html', 'http://www.example.com', None),
# Query string / fragment
('http://example.com/resource.html?foo=bar', 'http://example.com', 'http://example.com/resource.html'),
('http://example.com/resource.html#blah', 'http://example.com', 'http://example.com/resource.html'),
# Relative, not visited before
('resource.html', 'http://example.com', None),
('/resource.html', 'http://example.com', None),
# Absolute, same origin, visited before but _different_ protocol
('http://example.com/resource.html', 'https://example.com', 'https://example.com/resource.html'),
('https://example.com/resource.html', 'https://example.com', 'http://example.com/resource.html'),
# Relative, visited before but _different_ protocol
('/resource.html', 'http://example.com', 'https://example.com/resource.html'),
('//example.com/resource.html', 'http://example.com', 'https://example.com/resource.html'),
# Unicode
(u'/世界', 'http://example.com', None),
# Scary looking but OK
('awofawfi m29)(!F)', 'http://example.com', None),
(u'šðèæž ŠÐÈÆŽ', 'http://example.com', None),
(u'/šðèæž ŠÐÈÆŽ', 'http://example.com', None),
(u'http://example.com/šðèæž ŠÐÈÆŽ', 'http://example.com', None),
])
def test_good_links(href, origin, visited):
"""Adds valid links to queue"""
tag = Tag(name='a', attrs={'href': href})
origin = Href(origin)
queue = [Href(visited)] if visited is not None else []
old = copy.copy(queue)
assert queue == old, 'sanity check failed'
crawler.accept_link(tag, origin, queue, set())
expected = [Href(href).to_absolute(origin)] + old
assert expected == queue
@pytest.mark.parametrize('href,origin,visited', [
# Absolute, different origin, not visited before
('http://example.com/resource.js', 'https://foobar.net', None),
('https://example.com/resource.js', 'https://foobar.net', None),
# Absolute (double slash), different origin, not visited before
('//example.com/resource.js', 'https://foobar.net', None),
('//example.com/resource.js', 'https://foobar.net', None),
# Double slash with no domain
('//resource.html', 'http://example.com', None),
# Same origin, non-HTTP / HTTPS protocol
('ftp://example.com/resource.js', 'https://example.com', None),
('ssh://example.com/resource.js', 'https://example.com', None),
# Different TLDs
('http://www.example.com/resource.js', 'https://example.net', None),
# Query string / fragment
('http://example.com/resource.html?foo=bar', 'http://example.com', 'http://example.com/resource.html?foo=bar'),
('http://example.com/resource.html#blah', 'http://example.com', 'http://example.com/resource.html#blah'),
# Already visited - HTTP
('resource.html', 'http://example.com', 'http://example.com/resource.html'),
('/resource.html', 'http://example.com', 'http://example.com/resource.html'),
('http://example.com/resource.html', 'http://example.com', 'http://example.com/resource.html'),
# Already visited - HTTPS
('resource.html', 'https://example.com', 'https://example.com/resource.html'),
('//resource.html', 'https://example.com', 'https://example.com/resource.html'),
('https://example.com/resource.html', 'https://example.com', 'https://example.com/resource.html'),
# Malformed
('', 'http://example.com', None)
])
def test_bad_links(href, origin, visited):
"""Ignores invalid links"""
tag = Tag(name='a', attrs={'href': href})
origin = Href(origin)
queue = [Href(visited)] if visited is not None else []
expected = copy.copy(queue)
assert queue == expected, 'sanity check failed'
crawler.accept_link(tag, origin, queue, set())
assert expected == queue
|
Python
| 0.000036 |
@@ -76,36 +76,8 @@
oin%0A
-from bs4.element import Tag%0A
from
@@ -99,16 +99,16 @@
crawler%0A
+
from cra
@@ -2216,54 +2216,8 @@
%22%22%22%0A
- tag = Tag(name='a', attrs=%7B'href': href%7D)%0A
@@ -2352,32 +2352,8 @@
old
-, 'sanity check failed'%0A
%0A
@@ -2365,35 +2365,42 @@
ler.accept_link(
-tag
+Href(href)
, origin, queue,
@@ -2411,26 +2411,28 @@
())%0A
-expected =
+assert old +
%5BHref(h
@@ -2460,34 +2460,8 @@
in)%5D
- + old%0A assert expected
==
@@ -4123,62 +4123,8 @@
l'),
-%0A%0A # Malformed%0A ('', 'http://example.com', None)
%0A%5D)%0A
@@ -4202,80 +4202,8 @@
%22%22%22%0A
- tag = Tag(name='a', attrs=%7B'href': href%7D)%0A origin = Href(origin)%0A
@@ -4305,49 +4305,25 @@
ert
-queue == expected, 'sanity check failed'%0A
+expected == queue
%0A
@@ -4347,19 +4347,32 @@
ink(
-tag,
+Href(href), Href(
origin
+)
, qu
|
b712346d4e138ccd00599c2d2fb9fdd938964589
|
Remove startup.bat after resume.
|
pausable_unittest/windowspauser.py
|
pausable_unittest/windowspauser.py
|
import pausable_unittest
import os
import os.path
import sys
import subprocess
import tempfile
import ctypes
TASK_NAME = "pausable_unittest"
BASE_DIR = os.path.abspath(os.getcwd())
BAT_PATH = os.path.join(BASE_DIR, "startup.bat")
PYTHON_PATH = os.path.abspath(sys.executable)
SCRIPT_PATH = os.path.relpath(sys.argv[0])
BAT_CONTENT_CMD_OPEN = "cd /d \"%~dp0\"\n" + \
('start "pausable_unittest" cmd /k ""%s" "%s""\n' % (PYTHON_PATH, SCRIPT_PATH))
BAT_CONTENT_CMD_CLOSE = "cd /d \"%~dp0\"\n" + \
('start "pausable_unittest" cmd /c ""%s" "%s""\n' % (PYTHON_PATH, SCRIPT_PATH))
class Pauser(pausable_unittest.BasePauser):
def __init__(self, close_cmd=False):
super(Pauser, self).__init__()
self._close_cmd = close_cmd
def check_call(self, command):
subprocess.check_output(command, stderr=subprocess.STDOUT)
def check_output(self, command):
return subprocess.check_output(command, stderr=subprocess.STDOUT)
def is_admin(self):
return (ctypes.windll.shell32.IsUserAnAdmin() != 0)
def system_reboot(self):
self.check_call([ "shutdown.exe", "/r", "/t", "5" ])
def register_admin_startup(self):
try:
user = os.environ["USERNAME"]
command = [ "schtasks.exe", "/Create", "/RU", user, "/SC", "ONLOGON", "/TN", TASK_NAME, "/TR", BAT_PATH, "/F", "/RL", "HIGHEST" ]
self.check_call(command)
command = [ "schtasks.exe", "/Query", "/TN", TASK_NAME, "/XML", "ONE" ]
xml = self.check_output(command)
xml = xml.replace("<DisallowStartIfOnBatteries>true</DisallowStartIfOnBatteries>",
"<DisallowStartIfOnBatteries>false</DisallowStartIfOnBatteries>")
xml = xml.replace("<StopIfGoingOnBatteries>true</StopIfGoingOnBatteries>",
"<StopIfGoingOnBatteries>false</StopIfGoingOnBatteries>")
with tempfile.NamedTemporaryFile(dir=BASE_DIR, delete=False) as xml_file:
xml_file.write(xml)
xml_file.close()
xml_filename = xml_file.name
try:
command = [ "schtasks.exe", "/Create", "/TN", TASK_NAME, "/F", "/XML", xml_filename ]
self.check_call(command)
finally:
os.remove(xml_filename)
except:
self.unregister_startup()
def nonadmin_startup_filepath(self):
startup_folder = os.path.join(os.environ["APPDATA"], r'Microsoft\Windows\Start Menu\Programs\Startup')
return os.path.join(startup_folder, "pausable_unittest.bat")
def register_nonadmin_startup(self):
path = self.nonadmin_startup_filepath()
try:
with open(path, "w") as f:
f.write('"%s"' % BAT_PATH)
except:
if os.path.exists(path):
os.remove(path)
def register_startup(self):
with open(BAT_PATH, "w") as f:
if self._close_cmd:
f.write(BAT_CONTENT_CMD_CLOSE)
else:
f.write(BAT_CONTENT_CMD_OPEN)
if self.is_admin():
self.register_admin_startup()
else:
self.register_nonadmin_startup()
def unregister_startup(self):
try:
if self.is_admin():
self.check_call([ "schtasks.exe", "/Delete", "/TN", TASK_NAME, "/F" ])
else:
path = self.nonadmin_startup_filepath()
if os.path.exists(path):
os.remove(path)
except:
pass
def add_actions(self):
def reboot(self):
self.pause(("reboot",))
self.add_action("reboot", reboot)
def exec_for_reboot(self, command, expected_exitcode=0):
self.pause(("exec_for_reboot", command, expected_exitcode))
self.add_action("exec_for_reboot", exec_for_reboot)
def do_pause(self, info):
if info[0] == "reboot":
self.register_startup()
self.system_reboot()
elif info[0] == "exec_for_reboot":
cmd = info[1]
expected_exitcode = info[2]
self.register_startup()
ret = subprocess.call(cmd)
if type(expected_exitcode) == list or type(expected_exitcode) == tuple:
if ret in expected_exitcode:
raise subprocess.CalledProcessError(ret, str(cmd))
else:
if ret != expected_exitcode:
raise subprocess.CalledProcessError(ret, str(cmd))
def after_pause(self):
self.unregister_startup()
|
Python
| 0 |
@@ -3629,16 +3629,95 @@
(path)%0D%0A
+ if os.path.exists(BAT_PATH):%0D%0A os.remove(BAT_PATH)%0D%0A
|
81c32c9bc0868f7ccd764d8432fd46ccb7e6a8ef
|
Use get instead
|
paystackapi/tests/test_transfer.py
|
paystackapi/tests/test_transfer.py
|
import httpretty
from paystackapi.tests.base_test_case import BaseTestCase
from paystackapi.transfer import Transfer
class TestTransfer(BaseTestCase):
@httpretty.activate
def test_initiate(self):
"""Method defined to test transfer initiation."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfer requires OTP to continue"}',
status=201,
)
response = Transfer.initiate(
source="balance",
reason="Calm down",
amount="3794800",
recipient="RCP_gx2wn530m0i3w3m",
)
self.assertTrue(response['status'])
@httpretty.activate
def test_list(self):
"""Method defined to test transfer list."""
httpretty.register_uri(
httpretty.POST,
self.endpoint_url("/transfer"),
content_type='text/json',
body='{"status": true, "message": "Transfers retrieved"}',
status=201,
)
response = Transfer.list(
perPage=3,
page=1
)
self.assertTrue(response['status'])
|
Python
| 0 |
@@ -898,35 +898,34 @@
httpretty.
-POS
+GE
T,%0A s
|
9a425bae3af8cca7ad8be938d7f698ef65f42f3a
|
Update load_groups_pipeline.py (#210)
|
google/cloud/security/inventory/pipelines/load_groups_pipeline.py
|
google/cloud/security/inventory/pipelines/load_groups_pipeline.py
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Pipeline to load GSuite Groups into Inventory."""
import json
from google.cloud.security.common.gcp_api import errors as api_errors
from google.cloud.security.common.util import log_util
from google.cloud.security.inventory import errors as inventory_errors
from google.cloud.security.inventory.pipelines import base_pipeline
LOGGER = log_util.get_logger(__name__)
class LoadGroupsPipeline(base_pipeline.BasePipeline):
"""Pipeline to load groups data into Inventory."""
RESOURCE_NAME = 'groups'
def __init__(self, cycle_timestamp, configs, admin_client, dao):
"""Constructor for the data pipeline.
Args:
cycle_timestamp: String of timestamp, formatted as YYYYMMDDTHHMMSSZ.
configs: Dictionary of configurations.
admin_client: Admin API client.
dao: Data access object.
Returns:
None
"""
super(LoadGroupsPipeline, self).__init__(
cycle_timestamp, configs, admin_client, dao)
def _transform(self, groups_map):
"""Yield an iterator of loadable groups.
Args:
A list of group objects from the Admin SDK.
Yields:
An iterable of loadable groups as a per-group dictionary.
"""
for group in groups_map:
yield {'group_id': group['id'],
'group_email': group['email'],
'group_kind': group['kind'],
'direct_member_count': group['directMembersCount'],
'raw_group': json.dumps(group)}
def _retrieve(self):
"""Retrieve the groups from GSuite.
Returns:
A list of group list objects from the Admin SDK.
Raises:
LoadDataPipelineException: An error with loading data has occurred.
"""
try:
return self.api_client.get_groups()
except api_errors.ApiExecutionError as e:
raise inventory_errors.LoadDataPipelineError(e)
def run(self):
"""Runs the load GSuite account groups pipeline."""
groups_map = self._retrieve()
if isinstance(groups_map, list):
loadable_groups = self._transform(groups_map)
self._load(self.RESOURCE_NAME, loadable_groups)
self._get_loaded_count()
else:
LOGGER.warn('No groups retrieved.')
|
Python
| 0 |
@@ -1910,14 +1910,18 @@
roup
-%5B
+.get(
'id'
-%5D
+)
,%0A
@@ -1961,17 +1961,21 @@
roup
-%5B
+.get(
'email'
-%5D
+)
,%0A
@@ -2014,16 +2014,20 @@
roup
-%5B
+.get(
'kind'
-%5D
+)
,%0A
@@ -2075,9 +2075,13 @@
roup
-%5B
+.get(
'dir
@@ -2100,9 +2100,9 @@
unt'
-%5D
+)
,%0A
|
2695c8c52d292f94cf404c959b43e2c1c57ddc02
|
Implement handling of trade messages.
|
src/udprecieve.py
|
src/udprecieve.py
|
import json
import random
from twisted.internet.protocol import DatagramProtocol
from twisted.internet import reactor
from crypto import get_public_bytestring
from orderbook import (match_incoming_ask, match_incoming_bid,
get_bids, get_asks, get_own_bids, get_own_asks,
trades, offers, get_offer,
trade_offer, create_confirm, create_cancel)
# Printing functions for testing
def offer_to_string(offer):
s = "{\n"
for k, v in offer.iteritems():
if k == 'id':
v = v.split('\n')[1][:20] + '...'
s += "\t{}: {}\n".format(k, v)
s += " }"
return s
def offers_to_string(offers):
return '\n '.join(offer_to_string(offer) for offer in offers)
def print_all_offers():
print '''
Bids
=========
{}
Asks
========
{}
Own bids
========
{}
Own Asks
========
{}
Trades
========
{}
'''.format(*[offers_to_string(o) for o in (get_bids(), get_asks(), get_own_bids(), get_own_asks(), trades)])
class UdpReceive(DatagramProtocol):
def __init__(self, name):
self.name = name
self.history = {}
self.peers = {}
def startProtocol(self):
self.load_peers()
pass
def stopProtocol(self):
pass
def datagramReceived(self, data, (host, port)):
real_data = json.loads(data)
if not real_data['message-id'] in self.history:
handle_data(data)
self.relay_message(data)
self.history[real_data['message-id']] = True
print_all_offers()
else:
print "duplicate message received. ID:%d" % real_data['message-id']
def relay_message(self, message):
gossip_targets = random.sample(self.peers, 2)
for address in gossip_targets:
self.transport.write(message, (address, int(self.peers[address])))
def load_peers(self):
with open("peerlist.txt") as f:
for line in f:
(address, port) = line.split(':')
self.peers[address] = port
def handle_data(data):
try:
if data['type'] == 'ask':
response_dict = handle_ask(data)
elif data['type'] == 'bid':
response_dict = handle_bid(data)
elif data['type'] == 'greeting':
response_dict = handle_greeting(data)
elif data['type'] == 'trade':
response_dict = handle_trade(data)
elif data['type'] == 'confirm':
response_dict = handle_confirm(data)
elif data['type'] == 'cancel':
response_dict = handle_cancel(data)
return json.dumps(response_dict), data['type']
except ValueError, e:
print e.message
return e.message
def handle_ask(ask):
bid = match_incoming_ask(ask)
if bid:
return trade_offer(ask, bid)
else:
offers.append(ask)
return 'Your ask got processed!'
def handle_bid(bid):
ask = match_incoming_bid(bid)
if ask:
return trade_offer(bid, ask)
else:
offers.append(bid)
return "Your bid got processed!"
def handle_trade(trade):
offer = get_offer(id=get_public_bytestring(), message_id=trade['trade-id'])
if offer:
return create_confirm(recipient=trade['id'], trade_id=trade['trade-id'])
else:
return create_cancel(recipient=trade['id'], trade_id=trade['trade-id'])
def handle_confirm(confirm):
return 'Trade succesful!'
def handle_cancel(cancel):
return 'Trade cancelled'
def handle_greeting(greeting):
return 'Hi!'
reactor.listenMulticast(8005, UdpReceive("listener1"), listenMultiple=True)
# reactor.listenMulticast(8005, UdpSender("listener2"), listenMultiple=True)
reactor.run()
|
Python
| 0 |
@@ -307,16 +307,30 @@
t_offer,
+ remove_offer,
%0A
@@ -3165,29 +3165,23 @@
-offer = get_offer(id=
+id, trade_id =
get_
@@ -3200,16 +3200,63 @@
tring(),
+ trade%5B'trade-id'%5D%0A offer = get_offer(id=id,
message
@@ -3260,36 +3260,27 @@
age_id=trade
-%5B'trade-id'%5D
+_id
)%0A if off
@@ -3283,16 +3283,65 @@
offer:%0A
+ remove_offer(id=id, message_id=trade_id)%0A
@@ -3399,28 +3399,19 @@
id=trade
-%5B'trade-id'%5D
+_id
)%0A el
@@ -3480,28 +3480,19 @@
id=trade
-%5B'trade-id'%5D
+_id
)%0A%0A%0Adef
|
cc3ca68df357572767280bdddf332cfd430e9203
|
Enhance the test to avoid celery internal queues in rabbitmq status.
|
oneflow/base/utils/stats/rabbitmq.py
|
oneflow/base/utils/stats/rabbitmq.py
|
# -*- coding: utf-8 -*-
u"""
Copyright 2012-2014 Olivier Cortès <[email protected]>.
This file is part of the 1flow project.
1flow is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, either version 3 of
the License, or (at your option) any later version.
1flow is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public
License along with 1flow. If not, see http://www.gnu.org/licenses/
"""
import logging
import pyrabbit
from django.conf import settings
# from sparks.fabric import is_localhost
from oneflow.base.utils.http import split_url
LOGGER = logging.getLogger(__name__)
# AMQP_RE = re.compile(ur'amqp://(?P<username>[^:]+):(?P<password>\w+)@(?P<hostname_and_port>[^/]+)/(?P<vhost>[^/]+)', re.I) # NOQA
def get_rabbitmq_client_args_from_broker_url():
""" Decompose BROKER_URL into a tuple suitable for rabbitmq.Client(). """
proto, host_and_port, vhost = split_url(settings.BROKER_URL)
things = host_and_port.rsplit('@', 1)
if len(things) > 1:
username, password = things[0].split(':', 1)
host_and_port = things[1]
else:
username, password = 'guest', 'guest'
if not vhost:
vhost = '/'
host_and_port = host_and_port.replace(':5672', ':55672')
return [host_and_port, username, password, vhost]
if settings.BROKER_URL.lower().startswith('amqp://'):
rabbitmq_params = get_rabbitmq_client_args_from_broker_url()
rabbitmq_client = pyrabbit.Client(*rabbitmq_params[:-1])
try:
rabbitmq_client.is_alive()
except:
rabbitmq_params[0] = rabbitmq_params[0].replace(':55672', ':15672')
rabbitmq_client = pyrabbit.Client(*rabbitmq_params[:-1])
rabbitmq_vhost = rabbitmq_params[-1]
else:
rabbitmq_client = None
def rabbitmq_queues():
""" Return rabbitMQ client get_queues() result, or {}.
``{}`` is when RabbitMQ is not available, eg. ``BROKER_URL`` doesn't
start with ``amqp://``.
"""
if rabbitmq_client is None:
return {}
try:
queues = rabbitmq_client.get_queues(rabbitmq_vhost)
except:
LOGGER.exception(u'Could not connect to RabbitMQ API. '
u'Is the web interface plugin enabled?')
return {}
return [q for q in sorted(queues, key=lambda q: q['name'])
if not (q['name'].startswith('amq.gen')
or q['name'].startswith('celery'))]
|
Python
| 0 |
@@ -2561,16 +2561,25 @@
return %5B
+%0A
q for q
@@ -2633,20 +2633,16 @@
-
if not (
q%5B'n
@@ -2637,16 +2637,29 @@
if not (
+%0A
q%5B'name'
@@ -2698,48 +2698,45 @@
- or q%5B'name'%5D.startswith('celery'))
+or 'celery' in q%5B'name'%5D%0A )%0A
%5D%0A
|
a7894b858c31bbd7561b2e20ec43f5268f676f56
|
Remove TEMPLATE_ options from example-local.
|
onlineweb4/settings/example-local.py
|
onlineweb4/settings/example-local.py
|
import os
import sys
from base import PROJECT_ROOT_DIRECTORY
# Prevent python from making .pyc files
sys.dont_write_bytecode = True
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# Change this to the host in production
ALLOWED_HOSTS = '*'
DATABASES = {
#'default': {
#'ENGINE': 'django.db.backends.postgresql_psycopg2',
#'NAME': 'django',
#'USER': 'django',
#'PASSWORD': 'django',
#'HOST': '127.0.0.1',
#'PORT': '',
#},
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': 'db.db',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
},
}
# Email settings
# If you are actually sending mail, this should be replaced with an
# email adress you can get all mail to.
DEVELOPMENT_EMAIL = 'your_preferred_adress_here'
# Overwriting all the emails from base.py with the development email, so that
# all mail gets sent to the developer(s) instead of their actual targets.
# These variables should be used throughout the project instead of the actual
# adresses, so we can safely redirect all mail away from the live systems when
# running tests.
DEFAULT_FROM_EMAIL = '[email protected]'
EMAIL_ARRKOM = DEVELOPMENT_EMAIL
EMAIL_BEDKOM = DEVELOPMENT_EMAIL
EMAIL_DOTKOM = DEVELOPMENT_EMAIL
EMAIL_EKSKOM = DEVELOPMENT_EMAIL
EMAIL_FAGKOM = DEVELOPMENT_EMAIL
EMAIL_PROKOM = DEVELOPMENT_EMAIL
EMAIL_TRIKOM = DEVELOPMENT_EMAIL
#EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' # real
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' # prints
# GOOGLE_ANALYTICS_KEY = 'UA-XXXX-Y'
#MEDIA_ROOT = '/var/websites/prod/onlineweb_uploads'
MEDIA_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, "uploaded_media/")
#MEDIA_URL = '//media.online.ntnu.no/'
MEDIA_URL = '/media/'
#MEDIA_ROOT = '/var/websites/prod/static'
STATIC_ROOT = os.path.join(PROJECT_ROOT_DIRECTORY, 'collected_static')
#STATIC_URL = '//static.online.ntnu.no'
STATIC_URL = '/static/'
#Url of default profile picture
DEFAULT_PROFILE_PICTURE_URL = os.path.join(STATIC_URL, "img", "profile_default.png")
# Filebrowser local settings.
FILEBROWSER_MEDIA_ROOT = MEDIA_ROOT
# If you use django extensions that should not be used in production
# add them here.
# INSTALLED_APPS += (
# 'apps.example',
# 'debug_toolbar', # https://github.com/dcramer/django-debug-toolbar
# 'django_extensions', # http://packages.python.org/django-extensions/
# )
GENFORS_ADMIN_PASSWORD = 'ADMIN_PASSWORD'
SYMPA_DB_PASSWD = ''
SYMPA_DB_USER = ''
SYMPA_DB_NAME = ''
SYMPA_DB_PORT = ''
SYMPA_DB_HOST = ''
# Variables for group syncing script
#GROUP_SYNCER = [
# {
# 'name': 'Komite-enkeltgrupper til gruppen Komiteer',
# 'source': [
# 1, # Group ID 1
# 2, # Group ID 2
# ],
# 'destination': [
# 3 # Group ID 3
# ]
# }
#]
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'standard': {
'format': '%(asctime)s [%(levelname)s] %(name)s: %(message)s'
},
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
},
'console':{
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'standard'
},
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'syncer': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': True,
},
}
}
# Online stripe keys.
# For development replace with https://online.ntnu.no/wiki/komiteer/dotkom/aktuelt/onlineweb4/keys/
# For production login to Stripe
STRIPE_PUBLIC_KEYS = [
"pk_test_replace_this", # arrKom
"pk_test_replace_this" # proKom
]
STRIPE_PRIVATE_KEYS = [
"sk_test_replace_this", # arrKom
"sk_test_replace_this" # proKom
]
# Google reCaptcha settings
# Keys are found here: https://online.ntnu.no/wiki/komiteer/dotkom/aktuelt/onlineweb4/keys/
RECAPTCHA_PUBLIC_KEY = 'replace this'
RECAPTCHA_PRIVATE_KEY = 'replace this'
NOCAPTCHA = True
RECAPTCHA_USE_SSL = True
|
Python
| 0 |
@@ -145,31 +145,8 @@
True
-%0ATEMPLATE_DEBUG = DEBUG
%0A%0A#
|
9c04063c033a1f707b6befdc66280a426fba78e3
|
Add asterisk mark for the required field on CreateNamespaceForm
|
openstack_dashboard/dashboards/admin/metadata_defs/forms.py
|
openstack_dashboard/dashboards/admin/metadata_defs/forms.py
|
#
# (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Forms for managing metadata.
"""
import json
from django.forms import ValidationError
from django.urls import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import glance
from openstack_dashboard.dashboards.admin.metadata_defs \
import constants
class CreateNamespaceForm(forms.SelfHandlingForm):
source_type = forms.ChoiceField(
label=_('Namespace Definition Source'),
required=False,
choices=[('file', _('Metadata Definition File')),
('raw', _('Direct Input'))],
widget=forms.ThemableSelectWidget(
attrs={'class': 'switchable', 'data-slug': 'source'}))
metadef_file = forms.FileField(
label=_("Metadata Definition File"),
help_text=_("A local metadata definition file to upload."),
widget=forms.FileInput(
attrs={'class': 'switched', 'data-switch-on': 'source',
'data-source-file': _('Metadata Definition File')}),
required=False)
direct_input = forms.CharField(
label=_('Namespace JSON'),
help_text=_('The JSON formatted contents of a namespace.'),
widget=forms.widgets.Textarea(
attrs={'class': 'switched', 'data-switch-on': 'source',
'data-source-raw': _('Namespace JSON')}),
required=False)
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def clean(self):
data = super(CreateNamespaceForm, self).clean()
# The key can be missing based on particular upload
# conditions. Code defensively for it here...
metadef_file = data.get('metadef_file', None)
metadata_raw = data.get('direct_input', None)
if metadata_raw and metadef_file:
raise ValidationError(
_("Cannot specify both file and direct input."))
if not metadata_raw and not metadef_file:
raise ValidationError(
_("No input was provided for the namespace content."))
try:
if metadef_file:
ns_str = self.files['metadef_file'].read()
else:
ns_str = data['direct_input']
namespace = json.loads(ns_str)
if data['public']:
namespace['visibility'] = 'public'
else:
namespace['visibility'] = 'private'
namespace['protected'] = data['protected']
for protected_prop in constants.METADEFS_PROTECTED_PROPS:
namespace.pop(protected_prop, None)
data['namespace'] = namespace
except Exception as e:
msg = _('There was a problem loading the namespace: %s.') % e
raise forms.ValidationError(msg)
return data
def handle(self, request, data):
try:
namespace = glance.metadefs_namespace_create(request,
data['namespace'])
messages.success(request,
_('Namespace %s has been created.') %
namespace['namespace'])
return namespace
except Exception as e:
msg = _('Unable to create new namespace. %s')
msg %= e.message.split('Failed validating', 1)[0]
exceptions.handle(request, message=msg)
return False
class ManageResourceTypesForm(forms.SelfHandlingForm):
def handle(self, request, context):
namespace_name = self.initial['id']
current_names = self.get_names(self.initial['resource_types'])
try:
updated_types = json.loads(self.data['resource_types'])
selected_types = [updated_type for updated_type in updated_types
if updated_type.pop('selected', False)]
for current_name in current_names:
glance.metadefs_namespace_remove_resource_type(
self.request, namespace_name, current_name)
for selected_type in selected_types:
selected_type.pop('$$hashKey', None)
selected_type.pop('created_at', None)
selected_type.pop('updated_at', None)
glance.metadefs_namespace_add_resource_type(
self.request, namespace_name, selected_type)
msg = _('Resource types updated for namespace %s.')
msg %= namespace_name
messages.success(request, msg)
except Exception:
msg = _('Error updating resource types for namespace %s.')
msg %= namespace_name
exceptions.handle(request, msg)
return False
return True
def get_names(self, items):
return [item['name'] for item in items]
class UpdateNamespaceForm(forms.SelfHandlingForm):
public = forms.BooleanField(label=_("Public"), required=False)
protected = forms.BooleanField(label=_("Protected"), required=False)
def handle(self, request, data):
try:
params = {
'visibility': 'public' if data['public'] else 'private',
'protected': data['protected']
}
glance.metadefs_namespace_update(request,
self.initial['namespace_id'],
**params)
msg = _('Namespace successfully updated.')
messages.success(request, msg)
except Exception:
msg = _('Error updating attributes for namespace.')
redirect = reverse(constants.METADATA_INDEX_URL)
exceptions.handle(request, msg, redirect=redirect)
return False
return True
|
Python
| 0.000001 |
@@ -1167,32 +1167,8 @@
'),%0A
- required=False,%0A
@@ -1619,32 +1619,87 @@
-on': 'source',%0A
+ 'data-required-when-shown': 'true',%0A
@@ -2017,32 +2017,87 @@
-on': 'source',%0A
+ 'data-required-when-shown': 'true',%0A
|
c93ab0a8030cd3819a4278d29d2976a4a0a42fdb
|
Fix LDAPURL's tests
|
tests/test_ldapurl.py
|
tests/test_ldapurl.py
|
import pytest
from bonsai import LDAPURL
from bonsai import LDAPDN
from bonsai.errors import InvalidDN
@pytest.fixture
def valid_ldapurl():
""" Get a valid LDAPURL. """
strurl = "ldaps://testurl:444/cn=test,dc=test?sn,gn?base?(objectclass=*)?1.2.3.4"
return LDAPURL(strurl)
def test_get_address(valid_ldapurl):
""" Test get_address method. """
ldapi_url = LDAPURL("ldapi://%2Ftmp%2Fldapi")
assert valid_ldapurl.get_address() == "ldaps://testurl:444"
assert ldapi_url.get_address() == "ldapi://%2Ftmp%2Fldapi"
def test_get_host_properties(valid_ldapurl):
""" Test getting LDAPURL host properties. """
ldapi_url = LDAPURL("ldapi://%2Ftmp%2Fldapi")
assert valid_ldapurl.scheme == "ldaps"
assert valid_ldapurl.host == "testurl"
assert valid_ldapurl.port == 444
assert ldapi_url.scheme == "ldapi"
assert ldapi_url.port == 0
def test_set_host_properties():
""" Test setting LDAPURL host properties. """
url = LDAPURL()
with pytest.raises(ValueError):
url.host = ":malformed,@äđĐ-"
with pytest.raises(ValueError):
url.port = "9922"
with pytest.raises(ValueError):
url.scheme = "http"
url.host = "testurl2"
url.port = 589
url.scheme = "ldap"
assert url.scheme == "ldap"
assert url.host == "testurl2"
assert url.port == 589
def test_get_bind_properties(valid_ldapurl):
""" Test getting LDAPURL bind properties. """
assert valid_ldapurl.basedn == LDAPDN("cn=test,dc=test")
assert valid_ldapurl.scope == "base"
assert valid_ldapurl.filter == "(objectclass=*)"
assert valid_ldapurl.attributes == ["sn", "gn"]
def test_set_bind_properties():
""" Test setting LDAPURL bind properties. """
url = LDAPURL()
with pytest.raises(InvalidDN):
url.basedn = "test"
url.basedn = LDAPDN("cn=test")
assert str(url.basedn) == "cn=test"
def test_str(valid_ldapurl):
""" Test __str__ method of LDAPURL. """
assert (
str(valid_ldapurl)
== "ldaps://testurl:444/cn=test,dc=test?sn,gn?base?(objectclass=*)?1.2.3.4"
)
assert str(LDAPURL("ldap://127.0.0.1/cn=x?cn")) == "ldap://127.0.0.1:389/cn=x?cn"
assert str(LDAPURL("ldap:///")) == "ldap://localhost:389"
assert str(LDAPURL("ldapi:///")) == "ldapi://localhost"
assert not LDAPURL("ldap:///") == "http://localhost:389"
assert "<LDAPURL" in repr(valid_ldapurl)
def test_conversion():
""" Test ValueError exception for invalid URL format. """
with pytest.raises(ValueError):
_ = LDAPURL("ldap://failed.com/?falsedn?d")
def test_del_attr(valid_ldapurl):
""" Test trying to delete an attribute. """
with pytest.raises(AttributeError):
del valid_ldapurl.host
try:
_ = valid_ldapurl.host
except AttributeError:
pytest.fail("Attribute not should be deleted.")
def test_invalid():
""" Test invalid LDAP URLs. """
with pytest.raises(ValueError):
_ = LDAPURL("http://localhost")
with pytest.raises(ValueError):
_ = LDAPURL("ldaps://localost.")
def test_scope():
""" Test scope and scope_num property. """
url = LDAPURL("ldap:///??one")
assert url.scope_num == 1
url.scope = "base"
assert url.scope_num == 0
with pytest.raises(TypeError):
url.scope = 2.1
with pytest.raises(ValueError):
url.scope = "all"
def test_ipv6():
""" Test IPv6 address """
url = LDAPURL(
"ldap://[2001:db8:85a3::8a2e:370:7334]:1498/"
"o=University%20of%20Michigan,c=US??one?"
"(cn=Babs%20Jensen)"
)
assert url.host == "2001:db8:85a3::8a2e:370:7334"
assert url.port == 1498
assert url.scope == "one"
assert url.filter == "(cn=Babs Jensen)"
addr = url.get_address()
assert addr == "ldap://[2001:db8:85a3::8a2e:370:7334]:1498"
with pytest.raises(ValueError):
_ = LDAPURL("ldap://2001::85::37:7334")
|
Python
| 0.000006 |
@@ -1568,24 +1568,28 @@
apurl.filter
+_exp
== %22(object
@@ -3723,16 +3723,20 @@
l.filter
+_exp
== %22(cn
|
d9001013a070176756f49166552b7dbb5fb6aeb0
|
Fix plugin rendering in tests
|
tests/test_plugins.py
|
tests/test_plugins.py
|
# -*- coding: utf-8 -*-
import pytest
from cms.api import add_plugin
from cms.models import Placeholder
from cmsplugin_articles_ai.cms_plugins import ArticleList, TagFilterArticleList, TagList
from cmsplugin_articles_ai.factories import PublicArticleFactory, TagFactory
def create_articles(amount):
for _ in range(amount):
PublicArticleFactory()
def init_plugin(plugin_type, lang="en", **plugin_data):
"""
Creates a plugin attached into a placeholder
Returns an instance of plugin_type
"""
placeholder = Placeholder.objects.create(slot="test")
return add_plugin(placeholder, plugin_type, lang, **plugin_data)
@pytest.mark.django_db
def test_article_list_plugin_article_count():
"""
Test article list plugin inserts correct amount of articles into
the context. Amount is should be same as defined in plugin settings.
"""
article_count = 10
create_articles(article_count)
plugin = init_plugin(ArticleList, article_amount=3)
plugin_instance = plugin.get_plugin_class_instance()
context = plugin_instance.render({}, plugin, None)
assert len(context["articles"]) == 3
@pytest.mark.django_db
@pytest.mark.parametrize("language_filter", ["", "en", "fi"])
def test_article_list_plugin_language_filter(language_filter):
"""
Test article list plugin filters articles according to language filter
"""
article_fi = PublicArticleFactory(language="fi")
article_en = PublicArticleFactory(language="en")
plugin = init_plugin(ArticleList, language_filter=language_filter)
plugin_instance = plugin.get_plugin_class_instance()
context = plugin_instance.render({}, plugin, None)
if language_filter == "en":
assert article_fi not in context["articles"]
assert article_en in context["articles"]
elif language_filter == "fi":
assert article_fi in context["articles"]
assert article_en not in context["articles"]
else:
assert article_fi in context["articles"]
assert article_en in context["articles"]
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_article_list_plugin_html():
"""
Test article list plugin rendering works and html has
relevant content.
"""
plugin = init_plugin(ArticleList)
article = PublicArticleFactory()
html = plugin.render_plugin({})
assert article.title in html
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_tag_article_list_plugin_html():
"""
Test article list plugin rendering works and html has
relevant content.
"""
tag = TagFactory()
article = PublicArticleFactory(tags=[tag])
plugin = init_plugin(TagFilterArticleList)
plugin.tags.add(tag)
html = plugin.render_plugin({})
assert article.title in html
@pytest.mark.urls("cmsplugin_articles_ai.article_urls")
@pytest.mark.django_db
def test_tag_list_plugin_html():
"""
Test tag list plugin rendering works and html has
relevant content.
"""
plugin = init_plugin(TagList)
tag = TagFactory()
html = plugin.render_plugin({})
assert tag.name in html
|
Python
| 0 |
@@ -97,16 +97,65 @@
eholder%0A
+from cms.plugin_rendering import ContentRenderer%0A
from cms
@@ -404,16 +404,225 @@
ory()%0A%0A%0A
+def init_content_renderer(request=None):%0A %22%22%22%0A Create and return %60ContentRenderer%60 instance initiated with request.%0A Request may be %60None%60 in some cases.%0A %22%22%22%0A return ContentRenderer(request)%0A%0A%0A
def init
@@ -2601,38 +2601,136 @@
-html = plugin.render_plugin(%7B%7D
+renderer = init_content_renderer()%0A html = renderer.render_plugin(instance=plugin, context=%7B%7D, placeholder=plugin.placeholder
)%0A
@@ -3120,16 +3120,55 @@
dd(tag)%0A
+ renderer = init_content_renderer()%0A
html
@@ -3162,38 +3162,40 @@
er()%0A html =
-plugin
+renderer
.render_plugin(%7B
@@ -3185,34 +3185,91 @@
r.render_plugin(
-%7B%7D
+instance=plugin, context=%7B%7D, placeholder=plugin.placeholder
)%0A assert art
@@ -3546,24 +3546,63 @@
agFactory()%0A
+ renderer = init_content_renderer()%0A
html = p
@@ -3600,22 +3600,24 @@
html =
-plugin
+renderer
.render_
@@ -3623,18 +3623,75 @@
_plugin(
-%7B%7D
+instance=plugin, context=%7B%7D, placeholder=plugin.placeholder
)%0A as
|
f928cd15b6ac04e03e569694c57f43110ce04b0a
|
Add ignoretz to FeedEntryItemLoader in case the supplied timezone is bogus.
|
feeds/loaders.py
|
feeds/loaders.py
|
import html
import os
import re
from copy import deepcopy
from datetime import datetime
import dateparser
import lxml
from dateutil.parser import parse as dateutil_parse
from dateutil.tz import gettz
from lxml import etree
from lxml.cssselect import CSSSelector
from lxml.html import HtmlComment
from scrapy.loader import ItemLoader
from scrapy.loader.processors import Compose, Identity, Join, MapCompose, TakeFirst
from w3lib.html import remove_tags
from feeds.items import FeedEntryItem, FeedItem
def parse_datetime(date_time, loader_context):
if isinstance(date_time, datetime):
return date_time
elif isinstance(date_time, str):
try:
return dateutil_parse(
date_time.strip(),
dayfirst=loader_context.get("dayfirst", False),
yearfirst=loader_context.get("yearfirst", True),
)
except ValueError:
# If dateutil can't parse it, it might be a human-readable date.
return dateparser.parse(date_time)
else:
raise ValueError("date_time must be datetime or a str.")
def apply_timezone(date_time, loader_context):
if not date_time.tzinfo:
# If date_time object is not aware, apply timezone from loader_context.
# In case a timezone is not supplied, just assume UTC.
date_time = date_time.replace(
tzinfo=gettz(loader_context.get("timezone", "UTC"))
)
return date_time
def replace_regex(text, loader_context):
for pattern, repl in loader_context.get("replace_regex", {}).items():
text = re.sub(pattern, repl, text)
return text
def build_tree(text, loader_context):
base_url = loader_context.get("base_url", None)
tree = lxml.html.fragment_fromstring(text, create_parent="div", base_url=base_url)
# Workaround for https://bugs.launchpad.net/lxml/+bug/1576598.
# FIXME: Remove this when a workaround is released.
tree.getroottree().docinfo.URL = base_url
# Scrapy expects an iterator which it unpacks and feeds to the next
# function in the pipeline. trees are iterators but we don't want to them
# to get unpacked so we wrap the tree in another iterator.
return [tree]
def serialize_tree(tree, in_make_links=False):
return lxml.html.tostring(tree, encoding="unicode")
def make_links_absolute(tree):
if tree.base_url:
# Make references in tags like <a> and <img> absolute.
tree.make_links_absolute(handle_failures="ignore")
return [tree]
def cleanup_html(tree, loader_context):
for elem_child, elem_parent in loader_context.get("child_to_parent", {}).items():
sel_child = CSSSelector(elem_child)
sel_parent = CSSSelector(elem_parent)
for e_parent in sel_parent(tree):
e_children = sel_child(e_parent)
if e_children:
e_parent.getparent().replace(e_parent, e_children[0])
for elem_sel, elem_new in loader_context.get("replace_elems", {}).items():
elem_new = lxml.html.fragment_fromstring(elem_new)
selector = CSSSelector(elem_sel)
for elem in selector(tree):
# New element could be replaced more than once but every node must be a
# different element.
elem.getparent().replace(elem, deepcopy(elem_new))
# Remove tags.
for elem_sel in loader_context.get("remove_elems", []):
selector = CSSSelector(elem_sel)
for elem in selector(tree):
elem.getparent().remove(elem)
for elem_sel in loader_context.get("remove_elems_xpath", []):
for elem in tree.xpath(elem_sel):
elem.getparent().remove(elem)
# Change tag names.
for elem_sel, elem_tag in loader_context.get("change_tags", {}).items():
selector = CSSSelector(elem_sel)
for elem in selector(tree):
elem.tag = elem_tag
# tree.iter() iterates over the tree including the root node.
for elem in tree.iter():
# Remove HTML comments.
if isinstance(elem, HtmlComment):
elem.getparent().remove(elem)
# Remove class and id attribute from all elements which are not needed
# in the feed.
elem.attrib.pop("class", None)
elem.attrib.pop("id", None)
# Delete data- attributes that have no general meaning.
for attrib in list(elem.attrib.keys()):
if attrib.startswith("data-"):
elem.attrib.pop(attrib)
return [tree]
def convert_footnotes(tree, loader_context):
footnotes = []
# Convert footnotes.
for elem_sel in loader_context.get("convert_footnotes", []):
selector = CSSSelector(elem_sel)
for elem in selector(tree):
footnotes.append(elem.text_content())
ref = etree.Element("span")
ref.text = " [{}]".format(len(footnotes))
elem.getparent().replace(elem, ref)
# Add new <div> with all the footnotes, one per <p>
if footnotes:
footnotes_elem = etree.Element("div")
tree.append(footnotes_elem)
for i, footnote in enumerate(footnotes):
footnote_elem = etree.Element("p")
footnote_elem.text = "[{}] {}".format(i + 1, footnote)
footnotes_elem.append(footnote_elem)
return [tree]
def skip_empty_tree(tree):
if tree.text:
# Has a text.
return [tree]
if len(tree):
# Has children.
return [tree]
return None
def skip_false(value):
"""
Skip values that evaluate to False.
Scrapy only skips values that are None by default. In feeds we want to
tighten that policy and also skip empty strings, False and everything else
that evaluates to False.
"""
if value:
return value
return None
class BaseItemLoader(ItemLoader):
# Defaults
# Unescape twice to get rid of &&xxx; encoding errors.
default_input_processor = MapCompose(
skip_false, str.strip, html.unescape, html.unescape
)
default_output_processor = TakeFirst()
# Join first two elements on ": " and the rest on " - ".
title_out = Compose(lambda t: [": ".join(t[:2])] + t[2:], Join(" - "))
updated_in = MapCompose(skip_false, parse_datetime, apply_timezone)
author_name_out = Join(", ")
# Optional
path_out = Join(os.sep)
class FeedItemLoader(BaseItemLoader):
default_item_class = FeedItem
class FeedEntryItemLoader(BaseItemLoader):
default_item_class = FeedEntryItem
# Field specific
content_text_in = MapCompose(skip_false, str.strip, remove_tags)
content_text_out = Join("\n")
content_html_in = MapCompose(
skip_false,
replace_regex,
build_tree,
convert_footnotes,
cleanup_html,
skip_empty_tree,
make_links_absolute,
serialize_tree,
)
content_html_out = Join()
category_out = Identity()
# Site specific loaders
class CbirdFeedEntryItemLoader(FeedEntryItemLoader):
content_html_out = Join()
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 smartindent autoindent
|
Python
| 0 |
@@ -854,24 +854,88 @@
st%22, True),%0A
+ ignoretz=loader_context.get(%22ignoretz%22, False),%0A
|
dba62be0d8b87a66b415460a07f453536209b08e
|
change user api prefix to user/ from users/
|
dubdubdub/api_urls.py
|
dubdubdub/api_urls.py
|
from django.conf.urls import patterns, url
from django.views.decorators.cache import cache_page
from schools.api_views import SchoolsList, SchoolsInfo, SchoolInfo, Districts, \
SchoolsDiseInfo, SchoolDemographics, SchoolProgrammes, SchoolFinance, \
Blocks, Clusters, BlocksInsideDistrict, ClustersInsideDistrict, ClustersInsideBlock, \
DistrictOfSchool, BlockOfSchool, ClusterOfSchool, PincodeOfSchool, AssemblyOfSchool, \
ParliamentOfSchool
from users.api_views import TestAuthenticatedView
urlpatterns = patterns('',
# Caches the results of the url for 60 seconds
#url(r'^schools/list', cache_page(60)(SchoolsList.as_view()), name='api_schools_list'),
url(r'^$', 'schools.api_views.api_root', name='api_root'),
url(r'^schools/list$', SchoolsList.as_view(), name='api_schools_list'),
url(r'^schools/info$', SchoolsInfo.as_view(), name='api_schools_info'),
url(r'^schools/dise/(?P<year>[0-9\-]*)$', SchoolsDiseInfo.as_view(), name='api_schools_dise'),
url(r'^schools/school/(?P<pk>[0-9]+)$', SchoolInfo.as_view(), name='api_school_info'),
url(r'^schools/school/(?P<pk>[0-9]+)/demographics$', SchoolDemographics.as_view(), name='api_school_demo'),
url(r'^schools/school/(?P<pk>[0-9]+)/programmes$', SchoolProgrammes.as_view(), name='api_school_prog'),
url(r'^schools/school/(?P<pk>[0-9]+)/finance$', SchoolFinance.as_view(), name='api_school_finance'),
url(r'^boundary/districts$', Districts.as_view(), name="api_districts"),
url(r'^boundary/districts/(?P<id>[0-9]+)/blocks$', BlocksInsideDistrict.as_view(), name="api_districts_block"),
url(r'^boundary/districts/(?P<id>[0-9]+)/clusters$', ClustersInsideDistrict.as_view(), name="api_districts_cluster"),
url(r'^boundary/blocks$', Blocks.as_view(), name="api_blocks"),
url(r'^boundary/blocks/(?P<id>[0-9]+)/clusters$', ClustersInsideBlock.as_view(), name="api_blocks_clusters"),
url(r'^boundary/clusters$', Clusters.as_view(), name="api_clusters"),
url(r'^geo/district/(?P<pk>[0-9]+)$', DistrictOfSchool.as_view(), name="api_school_district"),
url(r'^geo/block/(?P<pk>[0-9]+)$', BlockOfSchool.as_view(), name="api_school_block"),
url(r'^geo/cluster/(?P<pk>[0-9]+)$', ClusterOfSchool.as_view(), name="api_school_cluster"),
url(r'^geo/pincode/(?P<pk>[0-9]+)$', PincodeOfSchool.as_view(), name="api_school_pincode"),
url(r'^geo/assembly/(?P<pk>[0-9]+)$', AssemblyOfSchool.as_view(), name="api_school_assembly"),
url(r'^geo/parliament/(?P<pk>[0-9]+)$', ParliamentOfSchool.as_view(), name="api_school_parliament"),
url('^users/signup$', 'users.api_views.signup', name='api_signup'),
url('^users/signin$', 'users.api_views.signin', name='api_signin'),
url('^users/signout$', 'users.api_views.signout', name='api_signout'),
url('^users/test_authenticated', TestAuthenticatedView.as_view(), name='api_test_authenticated'),
)
|
Python
| 0 |
@@ -2578,25 +2578,24 @@
url('%5Euser
-s
/signup$', '
@@ -2649,25 +2649,24 @@
url('%5Euser
-s
/signin$', '
@@ -2724,17 +2724,16 @@
l('%5Euser
-s
/signout
@@ -2798,17 +2798,16 @@
l('%5Euser
-s
/test_au
|
88d56e2857f09223175e9f845aebb496c143d08b
|
check for gl errors in sampler tests
|
tests/test_sampler.py
|
tests/test_sampler.py
|
import unittest
import moderngl
from common import get_context
class TestCase(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.ctx = get_context()
def test_attributes(self):
sampler = self.ctx.sampler()
# Default values
self.assertEqual(sampler.anisotropy, 1.0)
self.assertTrue(sampler.repeat_x)
self.assertTrue(sampler.repeat_y)
self.assertTrue(sampler.repeat_z)
self.assertEqual(sampler.filter, (moderngl.LINEAR, moderngl.LINEAR))
self.assertEqual(sampler.compare_func, '?')
self.assertEqual(sampler.border_color, (0.0, 0.0, 0.0, 0.0))
# Change values
sampler.anisotropy = self.ctx.max_anisotropy
sampler.filter = (moderngl.NEAREST_MIPMAP_NEAREST, moderngl.NEAREST)
sampler.compare_func = "<="
self.assertEqual(sampler.anisotropy, self.ctx.max_anisotropy)
self.assertEqual(sampler.filter, (moderngl.NEAREST_MIPMAP_NEAREST, moderngl.NEAREST))
self.assertEqual(sampler.compare_func, "<=")
# Ensure repeat parameters are set correctly
sampler.repeat_x = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, True, True))
sampler.repeat_y = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, False, True))
sampler.repeat_z = False
self.assertEqual((sampler.repeat_x, sampler.repeat_y, sampler.repeat_z), (False, False, False))
# Ensure border color values are set correctly
colors = [
(1.0, 0.0, 0.0, 0.0),
(0.0, 1.0, 0.0, 0.0),
(0.0, 0.0, 1.0, 0.0),
(0.0, 0.0, 0.0, 1.0),
]
for color in colors:
sampler.border_color = color
self.assertEqual(sampler.border_color, color)
# LOD
self.assertEqual(sampler.min_lod, -1000.0)
self.assertEqual(sampler.max_lod, 1000.0)
sampler.min_lod = 0.0
self.assertEqual(sampler.min_lod, 0.0)
sampler.max_lod = 500.0
self.assertEqual(sampler.max_lod, 500.0)
def test_clear_samplers(self):
self.ctx.clear_samplers(start=0, end=5)
self.ctx.clear_samplers(start=5, end=10)
self.ctx.clear_samplers(start=10, end=100)
|
Python
| 0 |
@@ -59,16 +59,243 @@
ontext%0A%0A
+def checkerror(func):%0A def wrapper(*args, **kwargs):%0A _ = get_context().error%0A func(*args, **kwargs)%0A err = get_context().error%0A assert err == 'GL_NO_ERROR', %22Error: %25s%22 %25 err%0A return wrapper%0A%0A
%0Aclass T
@@ -406,27 +406,39 @@
-def test_attributes
+@checkerror%0A def test_create
(sel
@@ -482,33 +482,156 @@
r()%0A
-%0A # Default values
+ sampler.use(location=0)%0A sampler.clear(location=0)%0A%0A @checkerror%0A def test_defaults(self):%0A sampler = self.ctx.sampler()
%0A
@@ -1001,16 +1001,203 @@
, 0.0))%0A
+ self.assertEqual(sampler.min_lod, -1000.0)%0A self.assertEqual(sampler.max_lod, 1000.0)%0A%0A @checkerror%0A def test_prop_changes(self):%0A sampler = self.ctx.sampler()
%0A
@@ -2060,16 +2060,103 @@
alse))%0A%0A
+ @checkerror%0A def test_border_color(self):%0A sampler = self.ctx.sampler()%0A%0A
@@ -2504,117 +2504,79 @@
- # LOD%0A self.assertEqual(sampler.min_lod, -1000.0)%0A self.assertEqual(sampler.max_lod, 1000.0
+@checkerror%0A def test_lod(self):%0A sampler = self.ctx.sampler(
)%0A%0A
@@ -2730,24 +2730,40 @@
od, 500.0)%0A%0A
+ @checkerror%0A
def test
|
77d99751bdb55d25eeaff94c40e292ee8e56cbf6
|
Add test for wrong number of time constants
|
tests/test_spiking.py
|
tests/test_spiking.py
|
from neurons import spiking
__author__ = 'johannes'
import pytest
import numpy as np
class TestVarious:
def test_neuron_no_spike(self):
# Neuron should not spike
timesteps = 20
# Two neurons
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Neuron 1 is connected to Neuron 2 with weight 1
weights = np.array([[0, 1], [0, 0]])
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps), dtype=bool)
current = spiking_model.simulate(spiketrain, weights, 19)
# The outcoming current on both neurons should be zero
assert np.array_equal(current, np.array([0, 0]))
def test_negative_weight(self):
# If weights are negative enough, neuron should not spike
# The same code as test_neuron_spike but with negative weight!
timesteps = 20
# Two neurons
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Neuron 1 is connected to Neuron 2 with weight -1
weights = np.array([[0, -1], [0, 0]])
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps), dtype=bool)
# Neuron 1 Spikes all the time :)
spiketrain[0,:] = 1
current = spiking_model.simulate(spiketrain, weights, 19)
# The outcoming current on Neuron 1 should be 0
# on Neuron 2 it should be negative
assert current[0] == 0
assert current[1] < 0
def test_neuron_spike(self):
# Neuron should spike
timesteps = 20
# Two neurons
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Neuron 1 is connected to Neuron 2 with weight 1
weights = np.array([[0, 1], [0, 0]], dtype=bool)
# Empty spiketrain of length 'timesteps'
spiketrain = np.zeros((2, timesteps))
# Neuron 1 Spikes all the time :)
spiketrain[0,:] = 1
current = spiking_model.simulate(spiketrain, weights, 19)
# The outcoming current on Neuron 1 should be 0
# on Neuron 2 it should be positive
assert current[0] == 0
assert current[1] > 0
def test_different_time_constants(self):
# Each neuron has different time constants
pass
class TestShouldFail:
def test_wrong_spiketrain_size(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
# Empty spiketrain is too short
spiketrain1 = np.zeros((2, 20))
# Neuron 1 is connected to Neuron 2 with weight 1
weights = np.array([[0, 1], [0, 0]], dtype=bool)
with pytest.raises(ValueError) as e:
current = spiking_model.simulate(spiketrain1, weights, 20)
assert "Spiketrain too short (0ms -- 19ms) for simulating time 20" in str(e.value)
def test_simulate_wrong_types(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 21))
weights = np.array([[0, 1], [0, 0]], dtype=bool)
# Spiketrain is not a numpy array
with pytest.raises(ValueError) as e:
current = spiking_model.simulate([0,0,0], weights, 20)
# Weights is not a matrix
with pytest.raises(ValueError) as e:
current = spiking_model.simulate(spiketrain1, [[0,1],[0,0]], 20)
# Time is not a int
with pytest.raises(ValueError) as e:
current = spiking_model.simulate(spiketrain1, weights, [20, 13])
assert "Variable t should be int or convertible to int" in str(e.value)
def test_wrong_weight_size(self):
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 21))
# Wrong weights
weights = np.array([[0, 1], [0, 0], [0, 0]], dtype=bool)
with pytest.raises(ValueError) as e:
current = spiking_model.simulate(spiketrain1, weights, 20)
assert "Weigths should be a quadratic matrix" in str(e.value)
def test_wrong_time_too_small(self):
# Simulate a time that is too small
spiking_model = spiking.SRM(neurons=2, threshold=1.0, t_current=0.3,
t_membrane=20, eta_reset=5, verbose=False)
spiketrain1 = np.zeros((2, 20))
weights = np.array([[0, 1], [0, 0]], dtype=bool)
with pytest.raises(ValueError) as e:
current = spiking_model.simulate(spiketrain1, weights, -1)
assert "Time to be simulated is too small" in str(e.value)
# FIXME
@pytest.mark.xfail
def test_wrong_number_of_constants(self):
# 3 Neurons, 3 different t_s, but only 2 different t_m
with pytest.raises(ValueError) as e:
spiking_model = spiking.SRM(neurons=3, threshold=1.0, t_current=[0.3, 0.2, 0.3],
t_membrane=[0.2, 0.5], eta_reset=[0.5, 0.5, 0.6], verbose=False)
|
Python
| 0.000271 |
@@ -5056,43 +5056,8 @@
e)%0A%0A
- # FIXME%0A @pytest.mark.xfail%0A
|
aeb2aaa106d7b37e0c9a3fc8a71364d79b00346d
|
Remove some debugging output from a migration
|
uk_results/migrations/0030_populate_postresult_post_election.py
|
uk_results/migrations/0030_populate_postresult_post_election.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from collections import defaultdict
from django.db import migrations, models
def set_post_election_from_post(apps, schema_editor):
"""
This is far from ideal. Try to guess the PostExtraElection
that this PostResult relates to. This will have to be done by looking
and the related memberships and assuming they're correct (sometimes they
won't be, and that will have to be fixed manually later).
"""
PostResult = apps.get_model('uk_results', 'PostResult')
PostExtraElection = apps.get_model('candidates', 'PostExtraElection')
print(PostResult.objects.all().count())
qs = PostResult.objects.all().select_related('post__extra')
for post_result in qs:
pee = None
elections = post_result.post.extra.elections.all()
if not elections.exists():
raise ValueError("Post with no elections found.")
if elections.count() == 1:
# This is an easy case – this post only has one known election
pee = PostExtraElection.objects.get(
election=elections.first(),
postextra=post_result.post.extra
)
post_result.post_election = pee
post_result.save()
else:
if not post_result.result_sets.exists():
# There are no results sets for this post_result
# so we can just delete it
post_result.delete()
continue
result_sets_by_election = defaultdict(list)
# Work out how many elections we have results for.
# If it's only 1, then use that one
for result_set in post_result.result_sets.all():
for candidate_result in result_set.candidate_results.all():
this_election = candidate_result.membership.extra.election
result_sets_by_election[this_election].append(result_set)
if len(set(result_sets_by_election.keys())) == 1:
election = result_sets_by_election.keys()[0]
pee = PostExtraElection.objects.get(
election=election,
postextra=post_result.post.extra
)
post_result.post_election = pee
post_result.save()
else:
# We have results for more than one election, but only
# a single PostResult object.
# Split the result_sets up in to a new PostResult per election
for election, result_sets in result_sets_by_election.items():
result_sets = set(result_sets)
pee = PostExtraElection.objects.get(
election=election,
postextra=post_result.post.extra
)
pr = PostResult.objects.create(
post_election=pee,
post=post_result.post,
confirmed=post_result.confirmed,
confirmed_resultset=post_result.confirmed_resultset
)
for result_set in result_sets:
result_set.post_result = pr
result_set.save()
post_result.delete()
class Migration(migrations.Migration):
dependencies = [
('uk_results', '0029_add_postresult_post_election'),
]
operations = [
migrations.RunPython(set_post_election_from_post),
]
|
Python
| 0.000004 |
@@ -622,52 +622,8 @@
')%0A%0A
- print(PostResult.objects.all().count())%0A
|
8fd65190a2a68a7afeab91b0a02c83309f72ccd6
|
Add tests to gen_test for generator, seems to work
|
tests/test_testing.py
|
tests/test_testing.py
|
import greenado
from greenado.testing import gen_test
from tornado.testing import AsyncTestCase
from tornado import gen
@gen.coroutine
def coroutine():
raise gen.Return(1234)
class GreenadoTests(AsyncTestCase):
@gen_test
def test_without_timeout(self):
assert greenado.gyield(coroutine()) == 1234
@gen_test(timeout=5)
def test_with_timeout(self):
assert greenado.gyield(coroutine()) == 1234
|
Python
| 0 |
@@ -253,32 +253,33 @@
_without_timeout
+1
(self):%0A
@@ -318,24 +318,151 @@
()) == 1234%0A
+ %0A @gen_test%0A @greenado.generator%0A def test_without_timeout2(self):%0A assert (yield coroutine()) == 1234%0A
%0A @ge
@@ -504,16 +504,17 @@
_timeout
+1
(self):%0A
@@ -549,29 +549,164 @@
ld(coroutine()) == 1234%0A
+ %0A @gen_test(timeout=5)%0A @greenado.generator%0A def test_with_timeout2(self):%0A assert (yield coroutine()) == 1234%0A
%0A
|
0d313502b8b5d850109b48cde8d3dea2dae0d802
|
Clean up __init__.py .
|
vcr/__init__.py
|
vcr/__init__.py
|
import logging
from .config import VCR
# Set default logging handler to avoid "No handler found" warnings.
import logging
try: # Python 2.7+
from logging import NullHandler
except ImportError:
class NullHandler(logging.Handler):
def emit(self, record):
pass
logging.getLogger(__name__).addHandler(NullHandler())
default_vcr = VCR()
def use_cassette(path, **kwargs):
return default_vcr.use_cassette(path, **kwargs)
|
Python
| 0.000021 |
@@ -101,31 +101,16 @@
rnings.%0A
-import logging%0A
try: #
@@ -267,16 +267,17 @@
pass%0A%0A
+%0A
logging.
@@ -323,16 +323,17 @@
ler())%0A%0A
+%0A
default_
@@ -348,14 +348,8 @@
R()%0A
-%0A%0Adef
use_
@@ -360,36 +360,10 @@
ette
-(path, **kwargs):%0A return
+ =
def
@@ -387,21 +387,5 @@
ette
-(path, **kwargs)
%0A
|
98bf3d45c7f9cef77273e977d28cc9eaeaa95905
|
fix import for state daily integration test
|
integrations/acquisition/covid_hosp/state_daily/test_scenarios.py
|
integrations/acquisition/covid_hosp/state_daily/test_scenarios.py
|
"""Integration tests for acquisition of COVID hospitalization."""
# standard library
import unittest
from unittest.mock import MagicMock
from unittest.mock import patch
# third party
import requests
# first party
from delphi.epidata.acquisition.covid_hosp.common.database import Database
from delphi.epidata.acquisition.covid_hosp.common.test_utils import TestUtils
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.epidata.acquisition.covid_hosp.state_daily.update import Update
import delphi.operations.secrets as secrets
# py3tester coverage target (equivalent to `import *`)
__test_target__ = \
'delphi.epidata.acquisition.covid_hosp.state_daily.update'
class AcquisitionTests(unittest.TestCase):
def setUp(self):
"""Perform per-test setup."""
# configure test data
self.test_utils = TestUtils(__file__)
# use the local instance of the Epidata API
Epidata.BASE_URL = 'http://delphi_web_epidata/epidata/api.php'
# use the local instance of the epidata database
secrets.db.host = 'delphi_database_epidata'
secrets.db.epi = ('user', 'pass')
# clear relevant tables
with Database.connect() as db:
with db.new_cursor() as cur:
cur.execute('truncate table covid_hosp_state_timeseries')
cur.execute('truncate table covid_hosp_meta')
def test_acquire_dataset(self):
"""Acquire a new dataset."""
# make sure the data does not yet exist
with self.subTest(name='no data yet'):
response = Epidata.covid_hosp('MA', Epidata.range(20200101, 20210101))
self.assertEqual(response['result'], -2)
# acquire sample data into local database
# mock out network calls to external hosts
with self.subTest(name='first acquisition'), \
patch.object(requests, 'get', side_effect=
[MagicMock(json=lambda: self.test_utils.load_sample_metadata())] +
list(self.test_utils.load_sample_revisions())) as mock_requests_get, \
patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:
acquired = Update.run()
self.assertTrue(acquired)
self.assertEqual(mock_requests_get.call_count, 6)
# make sure the data now exists
with self.subTest(name='initial data checks'):
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))
self.assertEqual(response['result'], 1)
self.assertEqual(len(response['epidata']), 1)
row = response['epidata'][0]
self.assertEqual(row['state'], 'WY')
self.assertEqual(row['date'], 20201209)
self.assertEqual(row['issue'], 20201213)
self.assertEqual(row['critical_staffing_shortage_today_yes'], 8)
actual = row['inpatient_bed_covid_utilization']
expected = 0.11729857819905214
self.assertAlmostEqual(actual, expected)
self.assertIsNone(row['critical_staffing_shortage_today_no'])
# expect 61 fields per row (63 database columns, except `id` and `record_type`)
self.assertEqual(len(row), 61)
# re-acquisition of the same dataset should be a no-op
with self.subTest(name='second acquisition'), \
patch.object(requests, 'get', side_effect=
[MagicMock(json=lambda: self.test_utils.load_sample_metadata())] +
list(self.test_utils.load_sample_revisions())) as mock_requests_get, \
patch.object(Network, 'fetch_dataset', return_value=self.test_utils.load_sample_dataset()) as mock_fetch:
acquired = Update.run()
self.assertFalse(acquired)
# make sure the data still exists
with self.subTest(name='final data checks'):
response = Epidata.covid_hosp('WY', Epidata.range(20200101, 20210101))
self.assertEqual(response['result'], 1)
self.assertEqual(len(response['epidata']), 1)
|
Python
| 0 |
@@ -495,16 +495,94 @@
Update%0A
+from delphi.epidata.acquisition.covid_hosp.state_daily.network import Network%0A
import d
|
83a7db27cd72bfc3699d56dfe8d2f988e3f8642e
|
fix typo
|
easylinker/parsers.py
|
easylinker/parsers.py
|
#-*- coding: utf-8 -*-
#from __future__ import unicode_literals
#from __future__ import print_function
from jinja2 import Template
import re
from config import PREDEFINED_VARIABLE_TABLE
from links import Link, LinkException
import platform
class ParserException(Exception):
pass
def to_unicode(val):
if type(val) == unicode:
return val
elif type(val) == str:
try:
return val.decode('utf-8')
except UnicodeDecodeError:
return val.decode('euc-kr')
else:
raise AssertionError('not valid type')
class VariableConverter(object):
def __init__(self, var_table):
self.var_table = var_table
def run(self, text):
template = Template(
text,
)
output = template.render(**self.var_table)
return output
class LineInfo(object):
def __init__(self, success, src, dst, platform):
self.success = success
self.src = src
self.dst = dst
self.platform = platform
def is_independent(self):
return self.platform == None
def is_win(self):
if self.palatform == None:
return False
WIN_PLATFORM_LIST = ['windows', 'win']
return self.platform.lower() in WIN_PLATFORM_LIST
def is_osx(self):
if self.palatform == None:
return False
OSX_PLATFORM_LIST = ['darwin', 'osx']
return self.platform.lower() in OSX_PLATFORM_LIST
def is_linux(self):
if self.palatform == None:
return False
LINUX_PLATFORM_LIST = ['linux']
return self.platform.lower() in LINUX_PLATFORM_LIST
@classmethod
def invalid_line(cls):
return LineInfo(False, None, None, None)
class LineParser(object):
SIMPLE_PROG = re.compile(r'(?P<src>.+)->(?P<dst>.+)')
PLATFORM_PROG = re.compile(r'(?P<platform>.+):(?P<src>.+)->(?P<dst>.+)')
PROG_LIST = [
PLATFORM_PROG,
SIMPLE_PROG,
]
def parse_simple(self, line):
m = self.SIMPLE_PROG.match(line)
if m:
src = m.group('src').strip()
dst = m.group('dst').strip()
return LineInfo(True, src, dst, None)
else:
return LineInfo.invalid_line()
def parse_platform(self, line):
m = self.PLATFORM_PROG.match(line)
if m:
src = m.group('src').strip()
dst = m.group('dst').strip()
platform = m.group('platform').strip()
return LineInfo(True, src, dst, platform)
else:
return LineInfo.invalid_line()
def parse(self, line):
line = line.strip()
if not line:
return LineInfo.invalid_line()
if line[0] == '#':
return LineInfo.invalid_line()
line_info = None
for prog in self.PROG_LIST:
if prog == self.PLATFORM_PROG:
line_info = self.parse_platform(line)
elif prog == self.SIMPLE_PROG:
line_info = self.parse_simple(line)
if line_info.success:
break
if not line_info.success:
raise ParserException('Not valid line [{}]'.format(line))
if not line_info.src:
raise ParserException('Empty src : [{}]'.format(line))
if not line_info.dst:
raise ParserException('Empty dst : [{}]'.format(line))
return line_info
def run(filename):
with open(filename, 'rb') as f:
content = f.read()
content = to_unicode(content)
assert type(content) == unicode
var_converter = VariableConverter(PREDEFINED_VARIABLE_TABLE)
content = var_converter.run(content)
line_list = content.splitlines()
curr_platform = platform.system().lower()
line_parser = LineParser()
for line in line_list:
line_info = line_parser.parse(line)
if not line_info.success:
continue
if line_info.is_win() and curr_platform != 'windows':
continue
if line_info.is_linux() and curr_platform != 'linux':
continue
if line_info.is_osx() and curr_platform != 'darwin':
continue
link = Link(line_info.src, line_info.dst)
try:
link.create()
except LinkException as e:
print(e.message)
|
Python
| 0.999991 |
@@ -1109,33 +1109,32 @@
if self.p
-a
latform == None:
@@ -1126,32 +1126,32 @@
atform == None:%0A
+
retu
@@ -1296,33 +1296,32 @@
if self.p
-a
latform == None:
@@ -1467,32 +1467,32 @@
is_linux(self):%0A
+
if self.
@@ -1492,17 +1492,16 @@
f self.p
-a
latform
|
e353bae122c6e55da022d73c42d7eee09a558b44
|
clean code
|
bin/visual_dl.py
|
bin/visual_dl.py
|
""" entry point of visual_dl
"""
import json
import os
import sys
from optparse import OptionParser
from flask import Flask, redirect
from flask import send_from_directory
from visualdl.log import logger
app = Flask(__name__, static_url_path="")
def option_parser():
"""
:return:
"""
parser = OptionParser(usage="usage: visual_dl visual_dl.py "\
"-p port [options]")
parser.add_option(
"-p",
"--port",
default=8040,
action="store",
dest="port",
help="rest api service port")
return parser.parse_args()
# return data
# status, msg, data
def gen_result(status, msg):
"""
:param status:
:param msg:
:return:
"""
result = dict()
result['status'] = status
result['msg'] = msg
result['data'] = {}
return result
server_path = os.path.abspath(os.path.dirname(sys.argv[0]))
static_file_path = "../visualdl/frontend/dist/"
@app.route('/static/<path:filename>')
def serve_static(filename):
print("aaa")
return send_from_directory(os.path.join(server_path, static_file_path), filename)
@app.route("/")
def index():
return redirect('/static/index.html', code=302)
@app.route('/hello')
def hello():
result = gen_result(0, "Hello, this is VisualDL!")
return json.dumps(result)
if __name__ == '__main__':
options, args = option_parser()
logger.info(" port=" + str(options.port))
app.run(debug=False, host="0.0.0.0", port=options.port)
|
Python
| 0.000008 |
@@ -1027,25 +1027,8 @@
e):%0A
- print(%22aaa%22)%0A
|
3c72aa1266f1008552a3979ac057251bf2f93053
|
Bump tensorflow in /training/xgboost/structured/base (#212)
|
training/xgboost/structured/base/setup.py
|
training/xgboost/structured/base/setup.py
|
#!/usr/bin/env python
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from setuptools import find_packages
from setuptools import setup
# While this is an xgboost sample, we will still require tensorflow and
# scikit-learn to be installed, since the sample uses certain functionalities
# available in those libraries:
# tensorflow: mainly to copy files seamlessly to GCS
# scikit-learn: the helpfer functions it provides, e.g. splitting datasets
REQUIRED_PACKAGES = [
'tensorflow==1.15.2',
'scikit-learn==0.20.2',
'pandas==0.24.2',
'xgboost==0.81',
'cloudml-hypertune',
]
setup(
name='trainer',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True,
description='AI Platform | Training | xgboost | Base'
)
|
Python
| 0.000545 |
@@ -1099,17 +1099,17 @@
w==1.15.
-2
+4
',%0A '
|
4b4736cc77ce429e8dbd562b7d68b5d00027a534
|
Disable periodically failing Bok Choy test
|
common/test/acceptance/tests/test_studio_split_test.py
|
common/test/acceptance/tests/test_studio_split_test.py
|
"""
Acceptance tests for Studio related to the split_test module.
"""
from ..fixtures.course import CourseFixture, XBlockFixtureDesc
from ..pages.studio.component_editor import ComponentEditorView
from test_studio_container import ContainerBase
from ..pages.studio.utils import add_advanced_component
from xmodule.partitions.partitions import Group, UserPartition
from bok_choy.promise import Promise
class SplitTest(ContainerBase):
"""
Tests for creating and editing split test instances in Studio.
"""
__test__ = True
def setup_fixtures(self):
course_fix = CourseFixture(
self.course_info['org'],
self.course_info['number'],
self.course_info['run'],
self.course_info['display_name']
)
course_fix.add_advanced_settings(
{
u"advanced_modules": ["split_test"],
u"user_partitions": [
UserPartition(0, 'Configuration alpha,beta', 'first', [Group("0", 'alpha'), Group("1", 'beta')]).to_json(),
UserPartition(1, 'Configuration 0,1,2', 'second', [Group("0", 'Group 0'), Group("1", 'Group 1'), Group("2", 'Group 2')]).to_json()
]
}
)
course_fix.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit')
)
)
).install()
self.course_fix = course_fix
self.user = course_fix.user
def verify_groups(self, container, active_groups, inactive_groups, verify_missing_groups_not_present=True):
"""
Check that the groups appear and are correctly categorized as to active and inactive.
Also checks that the "add missing groups" button/link is not present unless a value of False is passed
for verify_missing_groups_not_present.
"""
def wait_for_xblocks_to_render():
# First xblock is the container for the page, subtract 1.
return (len(active_groups) + len(inactive_groups) == len(container.xblocks) - 1, len(active_groups))
Promise(wait_for_xblocks_to_render, "Number of xblocks on the page are incorrect").fulfill()
def check_xblock_names(expected_groups, actual_blocks):
self.assertEqual(len(expected_groups), len(actual_blocks))
for idx, expected in enumerate(expected_groups):
self.assertEqual('Expand or Collapse\n{}'.format(expected), actual_blocks[idx].name)
check_xblock_names(active_groups, container.active_xblocks)
check_xblock_names(inactive_groups, container.inactive_xblocks)
# Verify inactive xblocks appear after active xblocks
check_xblock_names(active_groups + inactive_groups, container.xblocks[1:])
if verify_missing_groups_not_present:
self.verify_add_missing_groups_button_not_present(container)
def verify_add_missing_groups_button_not_present(self, container):
"""
Checks that the "add missing gorups" button/link is not present.
"""
def missing_groups_button_not_present():
button_present = container.missing_groups_button_present()
return (not button_present, not button_present)
Promise(missing_groups_button_not_present, "Add missing groups button should not be showing.").fulfill()
def create_poorly_configured_split_instance(self):
"""
Creates a split test instance with a missing group and an inactive group.
Returns the container page.
"""
unit = self.go_to_unit_page(make_draft=True)
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.course_fix.add_advanced_settings(
{
u"user_partitions": [
UserPartition(0, 'Configuration alpha,beta', 'first',
[Group("0", 'alpha'), Group("2", 'gamma')]).to_json()
]
}
)
self.course_fix._add_advanced_settings()
return self.go_to_container_page()
def test_create_and_select_group_configuration(self):
"""
Tests creating a split test instance on the unit page, and then
assigning the group configuration.
"""
unit = self.go_to_unit_page(make_draft=True)
add_advanced_component(unit, 0, 'split_test')
container = self.go_to_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration alpha,beta')
self.verify_groups(container, ['alpha', 'beta'], [])
# Switch to the other group configuration. Must navigate again to the container page so
# that there is only a single "editor" on the page.
container = self.go_to_container_page()
container.edit()
component_editor = ComponentEditorView(self.browser, container.locator)
component_editor.set_select_value_and_save('Group Configuration', 'Configuration 0,1,2')
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['alpha', 'beta'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_container_page()
self.verify_groups(container, ['Group 0', 'Group 1', 'Group 2'], ['alpha', 'beta'])
def test_missing_group(self):
"""
The case of a split test with invalid configuration (missing group).
"""
container = self.create_poorly_configured_split_instance()
container.add_missing_groups()
self.verify_groups(container, ['alpha', 'gamma'], ['beta'])
# Reload the page to make sure the groups were persisted.
container = self.go_to_container_page()
self.verify_groups(container, ['alpha', 'gamma'], ['beta'])
def test_delete_inactive_group(self):
"""
Test deleting an inactive group.
"""
container = self.create_poorly_configured_split_instance()
container.delete(0)
self.verify_groups(container, ['alpha'], [], verify_missing_groups_not_present=False)
|
Python
| 0 |
@@ -64,16 +64,43 @@
e.%0A%22%22%22%0A%0A
+from unittest import skip%0A%0A
from ..f
@@ -5802,32 +5802,129 @@
pha', 'beta'%5D)%0A%0A
+ @skip(%22This fails periodically where it fails to trigger the add missing groups action.Dis%22)%0A
def test_mis
|
0ec3bfbd91e6e967bb2baae0307e76aafbb5aa91
|
Simplify the base types
|
blackjax/base.py
|
blackjax/base.py
|
from typing import Callable, NamedTuple, Tuple
from typing_extensions import Protocol
from .types import PRNGKey, PyTree
Position = PyTree
State = NamedTuple
Info = NamedTuple
class InitFn(Protocol):
"""A `Callable` used to initialize the kernel state.
Sampling algorithms often need to carry over some informations between
steps, often to avoid computing the same quantity twice. Therefore the
kernels do not operate on the chain positions themselves, but on states that
contain this position and other information.
The `InitFn` returns the state corresponding to a chain position. This state
can then be passed to the `update` function of the `SamplingAlgorithm`.
"""
def __call__(self, position: Position) -> State:
"""The initialization function.
Parameters
----------
position
A chain position.
Returns
-------
The kernel state that corresponds to the position.
"""
class Kernel:
"""A transition kernel used as the `update` of a `SamplingAlgorithms`.
Kernels are pure functions and are idempotent. They necessarily take a
random state `rng_key` and the current kernel state (which contains the
current position) as parameters, return a new state and some information
about the transtion.
"""
def __call__(self, rng_key: PRNGKey, state: State) -> Tuple[State, Info]:
"""The transition kernel.
Parameters
----------
rng_key:
The random state used by JAX's random numbers generator.
state:
The current kernel state. The kernel state contains the current
chain position as well as other information the kernel needs to
carry over from the previous step.
Returns
-------
A new state, as well as a NamedTuple that contains extra information
about the transition that does not need to be carried over to the next
step.
"""
class SamplingAlgorithm(NamedTuple):
"""A pair of functions that implement a sampling algorithm.
Blackjax sampling algorithms are implemented as a pair of pure functions: a
kernel, that takes a new samples starting from the current state, and an
initialization function that creates a kernel state from a chain position.
As they represent Markov kernels, the kernel functions are pure functions
and do not have internal state. To save computation time they also operate
on states which contain the chain state and additional information that
needs to be carried over for the next step.
Attributes
---------
init:
A pure function which when called with the initial position and the
target density probability function will return the kernel's initial
state.
step:
A pure function that takes a rng key, a state and possibly some
parameters and returns a new state and some information about the
transition.
"""
init: InitFn
step: Kernel
class SamplingAlgorithmGenerator(NamedTuple):
"""A pair of functions that implement a kenel generator.
This is meant to be a quick fix until we can pass the values of parameters
directly to the step function.
"""
init: InitFn
kernel: Callable
|
Python
| 0.002563 |
@@ -15,18 +15,8 @@
port
- Callable,
Nam
@@ -3054,276 +3054,4 @@
nel%0A
-%0A%0Aclass SamplingAlgorithmGenerator(NamedTuple):%0A %22%22%22A pair of functions that implement a kenel generator.%0A%0A This is meant to be a quick fix until we can pass the values of parameters%0A directly to the step function.%0A%0A %22%22%22%0A%0A init: InitFn%0A kernel: Callable%0A
|
f1b22cfcca8470a59a7bab261bbd2a46a7c2a2ed
|
Fix unicode issues at url translation
|
socib_cms/cmsutils/utils.py
|
socib_cms/cmsutils/utils.py
|
# coding: utf-8
import re
from django.core.urlresolvers import reverse
from django.conf import settings
def reverse_no_i18n(viewname, *args, **kwargs):
result = reverse(viewname, *args, **kwargs)
m = re.match(r'(/[^/]*)(/.*$)', result)
return m.groups()[1]
def change_url_language(url, language):
if hasattr(settings, 'LANGUAGES'):
languages = [lang[0] for lang in settings.LANGUAGES]
m = re.match(r'/([^/]*)(/.*$)', url)
if m and m.groups()[0] in languages:
return "/{lang}{url}".format(
lang=language,
url=m.groups()[1])
return "/{lang}{url}".format(
lang=language,
url=url)
return url
|
Python
| 0.00022 |
@@ -507,32 +507,33 @@
return
+u
%22/%7Blang%7D%7Burl%7D%22.f
@@ -617,16 +617,17 @@
return
+u
%22/%7Blang%7D
|
24a0f128250b285b1f6a810c68bbea78c510fc62
|
remove debug logging
|
DTBitmap2D.py
|
DTBitmap2D.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
import numpy as np
class _DTBitmap2D(object):
"""docstring for DTBitmap2D"""
CHANNEL_NAMES = ("red", "green", "blue", "alpha", "gray")
def __init__(self):
super(_DTBitmap2D, self).__init__()
print "__init__", type(self)
self.grid = (0, 0, 1, 1)
for n in _DTBitmap2D.CHANNEL_NAMES:
setattr(self, n, None)
def dt_type(self):
return "2D Bitmap"
def dtype(self):
for x in _DTBitmap2D.CHANNEL_NAMES:
v = getattr(self, x)
if v != None:
return v.dtype
return None
def dt_write(self, datafile, name):
suffix = "16" if self.dtype() in (np.uint16, np.int16) else ""
for channel_name in _DTBitmap2D.CHANNEL_NAMES:
values = getattr(self, channel_name)
if values != None:
channel_name = channel_name.capitalize() + suffix
datafile.write_anonymous(values, "_".join((name, channel_name)))
datafile.write_anonymous(self.grid, name)
class _DTGDALBitmap2D(_DTBitmap2D):
"""docstring for DTGDALBitmap2D"""
def __init__(self, image_path):
super(_DTGDALBitmap2D, self).__init__()
from osgeo import gdal
from osgeo.gdalconst import GA_ReadOnly
dataset = gdal.Open(str(image_path), GA_ReadOnly)
(xmin, dx, rot1, ymax, rot2, dy) = dataset.GetGeoTransform()
mesh = dataset.ReadAsArray()
ymin = ymax + dy * dataset.RasterYSize
self.grid = (xmin, ymin, dx, abs(dy))
# e.g., (3, 900, 1440) for an RGB
if len(mesh.shape) == 3:
channel_count = mesh.shape[0]
name_map = {}
if channel_count == 2:
# Gray + Alpha
name_map = {0:"gray", 1:"alpha"}
elif channel_count == 3:
# RGB (tested with screenshot)
name_map = {0:"red", 1:"green", 2:"blue"}
elif channel_count == 4:
# RGBA
name_map = {0:"red", 1:"green", 2:"blue", 3:"alpha"}
for idx in name_map:
channel = np.flipud(mesh[idx,:])
setattr(self, name_map[idx], channel)
elif len(mesh.shape) == 2:
# Gray (tested with int16)
self.gray = np.flipud(mesh)
del dataset
def _array_from_image(image):
"""Convert a PIL image to a numpy ndarray.
Arguments:
image -- a PIL image instance
Returns:
a numpy ndarray or None if an error occurred
"""
array = None
if image.mode.startswith(("I", "F")):
def _parse_mode(mode):
# Modes aren't very well documented, and I see results that
# differ from the documentation. They seem to follow this:
# http://www.pythonware.com/library/pil/handbook/decoder.htm
suffix = mode.split(";")[-1]
np_type = ""
if suffix != mode:
mode_size = ""
mode_fmt = ""
for c in suffix:
if c.isdigit():
mode_size += c
else:
mode_fmt += c
if mode_fmt.startswith("N") is False:
# big-endian if starts with B, little otherwise
np_type += ">" if mode_fmt.startswith("B") else "<"
if mode_fmt.endswith("S"):
# signed int
np_type += "i"
else:
# float or unsigned int
np_type += "f" if mode.endswith("F") else "u"
# convert to size in bytes
np_type += str(int(mode_size) / 8)
elif mode == "F":
np_type = "f4"
elif mode == "I":
np_type = "i4"
else:
return None
return np.dtype(np_type)
dt = _parse_mode(image.mode)
if dt is None:
print "unable to determine image bit depth and byte order for mode \"%s\"" % (image.mode)
else:
try:
# fails for signed int16 images produced by GDAL, but works with unsigned
array = np.fromstring(image.tostring(), dtype=dt)
array = array.reshape((image.size[1], image.size[0]))
except Exception, e:
print "image.tostring() failed for image with mode \"%s\" (PIL error: %s)" % (image.mode, str(e))
else:
# doesn't seem to work reliably for GDAL-produced 16 bit GeoTIFF
array = np.asarray(image)
return array
class _DTPILBitmap2D(_DTBitmap2D):
"""docstring for DTPILBitmap2D"""
def __init__(self, image_or_path):
super(_DTPILBitmap2D, self).__init__()
image = Image.open(image_or_path) if isinstance(image_or_path, basestring) else image_or_path
array = _array_from_image(image)
assert array is not None, "unable to convert the image to a numpy array"
assert array.dtype in (np.int16, np.uint16, np.uint8, np.int8, np.bool), "unsupported bit depth"
if image.mode in ("1", "P", "L", "LA") or image.mode.startswith(("F", "I")):
# Convert binary image of dtype=bool to uint8, although this is probably
# a better candidate for a or use as a mask.
if image.mode == "1":
print "warning: converting binary image to uint8"
# TODO: this crashes when I test it with a binary TIFF, but it looks like a
# bug in numpy or PIL. Strangely, it doesn't crash if I copy immediately after
# calling asarray above.
array = array.copy().astype(np.uint8)
array *= 255
if image.mode in ("1", "L", "P") or image.mode.startswith(("F", "I")):
self.gray = np.flipud(array)
else:
assert image.mode == "LA", "requires gray + alpha image"
self.gray = np.flipud(array[:,0])
self.alpha = np.flipud(array[:,1])
elif image.mode in ("RGB", "RGBA"):
self.red = np.flipud(array[:,:,0])
self.green = np.flipud(array[:,:,1])
self.blue = np.flipud(array[:,:,2])
if image.mode == "RGBA":
self.alpha = np.flipud(array[:,:,3])
del image
def DTBitmap2D(path_or_image):
obj = None
if isinstance(path_or_image, basestring):
try:
obj = _DTGDALBitmap2D(path_or_image)
except Exception, e:
print "Failed to create GDAL representation:", e
obj = None
if obj == None:
try:
obj = _DTPILBitmap2D(path_or_image)
except Exception, e:
print "Failed to create PIL representation:", e
obj = None
return obj
if __name__ == '__main__':
from datatank_py.DTDataFile import DTDataFile
with DTDataFile("/tmp/DTBitmap2D.dtbin", truncate=True) as df:
df["GDAL image"] = _DTGDALBitmap2D("examples/int16.tiff")
df["PIL image"] = _DTPILBitmap2D("/Library/Desktop Pictures/Art/Poppies Blooming.jpg")
# for v in df:
# print "%s = %s" % (v, df[v])
|
Python
| 0.000002 |
@@ -296,45 +296,8 @@
_()%0A
- print %22__init__%22, type(self)%0A
|
ed29f8c4bda7b8b4f9cf1f6281afcd14c7de0d2b
|
update it
|
lib/gtp/support/gtpv2c_tlv_gen.py
|
lib/gtp/support/gtpv2c_tlv_gen.py
|
#
# Copyright (c) 2017, CellWire Group
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
from docx import Document
import re, os, sys, string
import getopt
version = "0.1.0"
verbosity = 0
filename = ""
outdir = './'
FAIL = '\033[91m'
INFO = '\033[93m'
ENDC = '\033[0m'
def printDebug(string):
if verbosity > 0:
print string
def printInfo(string):
sys.stderr.write(INFO + string + ENDC + "\n")
def printFail(string):
sys.stderr.write(FAIL + string + ENDC + "\n")
sys.exit(0)
def usage():
print "Python adding prefix for asn1 v%s" % (version)
print "Usage: python asn1prefix.py [options]"
print "Available options:"
print "-d Enable script debug"
print "-f [file] Input file to parse"
print "-o [dir] Output files to given directory"
print "-h Print this help and return"
try:
opts, args = getopt.getopt(sys.argv[1:], "df:ho:", ["debug", "file", "help", "output"])
except getopt.GetoptError as err:
# print help information and exit:
usage()
sys.exit(2)
for o, a in opts:
if o in ("-d", "--debug"):
verbosity = 1
if o in ("-f", "--file"):
filename = a
if o in ("-o", "--output"):
outdir = a
if outdir.rfind('/') != len(outdir):
outdir += '/'
if o in ("-h", "--help"):
usage()
sys.exit(2)
if os.path.isfile(filename) and os.access(filename, os.R_OK):
in_file = open(filename, 'r')
else:
printFail("Cannot find file : " + filename)
document = Document(filename)
ie_table = ""
msg_table = ""
for table in document.tables:
cell = table.rows[0].cells[0]
if cell.text.find('IE Type value') != -1:
ie_table = table
printInfo("[Information Element Table]")
printInfo("...done");
if cell.text.find('Message Type value') != -1:
msg_table = table
printInfo("[Message Table]")
printInfo("...done");
printInfo("[Message Type]")
msg_list = {}
for row in msg_table.rows[1:]:
msg_list[row.cells[1].text] = { "type": row.cells[0].text }
printDebug(row.cells[1].text + " " + "type:" + row.cells[0].text)
printInfo("...done")
printInfo("[Information Element Type]")
ie_list = {}
for row in ie_table.rows[1:-5]:
if row.cells[1].text.find('Reserved') != -1:
continue
ie_list[row.cells[1].text] = { "type": row.cells[0].text }
printDebug(row.cells[1].text + " " + "type:" + row.cells[0].text)
printInfo("...done")
# Data will be a list of rows represented as dictionaries
# containing each row's data.
#data = []
#keys = None
#for i, row in enumerate(tlv_table.rows):
# text = (cell.text for cell in row.cells)
#
# # Establish the mapping based on the first row
# # headers; these will become the keys of our dictionary
# if i == 0:
# keys = tuple(text)
# continue
#
# # Construct a dictionary for this row, mapping
# # keys to values for this row
# row_data = dict(zip(keys, text))
# data.append(row_data)
#
#print data
|
Python
| 0 |
@@ -3249,43 +3249,352 @@
ows%5B
-1:
+2:-4
%5D:%0A
-msg_list%5Brow.cells%5B1%5D.text
+if len(row.cells%5B0%5D.text) == 0:%0A continue%0A if row.cells%5B0%5D.text.find('to') != -1:%0A continue%0A if row.cells%5B1%5D.text.find('Reserved') != -1:%0A continue%0A name = row.cells%5B1%5D.text%0A name = re.sub('%5Cn', '', name)%0A name = re.sub('%5C(%5B%5E%5C)%5D*%5C)*', '', name)%0A name = re.sub('%5Cs$', '', name)%0A msg_list%5Bname
%5D =
@@ -3638,33 +3638,20 @@
ntDebug(
-row.cells%5B1%5D.text
+name
+ %22 %22 +
@@ -3702,16 +3702,17 @@
done%22)%0A%0A
+#
printInf
@@ -3743,16 +3743,17 @@
Type%5D%22)%0A
+#
ie_list
@@ -3749,32 +3749,33 @@
)%0A#ie_list = %7B%7D%0A
+#
for row in ie_ta
@@ -3790,16 +3790,17 @@
%5B1:-5%5D:%0A
+#
if r
@@ -3832,32 +3832,33 @@
served') != -1:%0A
+#
continue
@@ -3854,24 +3854,25 @@
continue%0A
+#
ie_list%5B
@@ -3914,32 +3914,33 @@
cells%5B0%5D.text %7D%0A
+#
printDebug(r
@@ -3985,32 +3985,33 @@
.cells%5B0%5D.text)%0A
+#
printInfo(%22...do
|
1ad5d9a10c1744032a4e3b19438869f79dfa1d9b
|
Remove obsolete &rtp test
|
test/test_vim.py
|
test/test_vim.py
|
# -*- coding: utf-8 -*-
import os, tempfile
from nose.tools import with_setup, eq_ as eq, ok_ as ok
from common import vim, cleanup
def source(code):
fd, fname = tempfile.mkstemp()
with os.fdopen(fd,'w') as f:
f.write(code)
vim.command('source '+fname)
os.unlink(fname)
@with_setup(setup=cleanup)
def test_command():
fname = tempfile.mkstemp()[1]
vim.command('new')
vim.command('edit %s' % fname)
# skip the "press return" state, which does not handle deferred calls
vim.input('\r')
vim.command('normal itesting\npython\napi')
vim.command('w')
ok(os.path.isfile(fname))
eq(open(fname).read(), 'testing\npython\napi\n')
os.unlink(fname)
@with_setup
def test_command_output():
eq(vim.command_output('echo test'), 'test')
@with_setup(setup=cleanup)
def test_eval():
vim.command('let g:v1 = "a"')
vim.command('let g:v2 = [1, 2, {"v3": 3}]')
eq(vim.eval('g:'), {'v1': 'a', 'v2': [1, 2, {'v3': 3}]})
@with_setup(setup=cleanup)
def test_call():
eq(vim.funcs.join(['first', 'last'], ', '), 'first, last')
source("""
function! Testfun(a,b)
return string(a:a).":".a:b
endfunction
""")
eq(vim.funcs.Testfun(3, 'alpha'), '3:alpha')
@with_setup(setup=cleanup)
def test_strwidth():
eq(vim.strwidth('abc'), 3)
# 6 + (neovim)
# 19 * 2 (each japanese character occupies two cells)
eq(vim.strwidth('neovimのデザインかなりまともなのになってる。'), 44)
@with_setup(setup=cleanup)
def test_list_runtime_paths():
# Is this the default runtime path list?
homedir = os.path.join(os.environ['HOME'], '.nvim')
vimdir = vim.eval('$VIM')
dflt_rtp = [
homedir,
os.path.join(vimdir, 'vimfiles'),
vimdir,
os.path.join(vimdir, 'vimfiles', 'after')
]
# If the runtime is installed the default path
# is nvim/runtime
dflt_rtp2 = list(dflt_rtp)
dflt_rtp2[2] = os.path.join(dflt_rtp2[2], 'runtime')
rtp = vim.list_runtime_paths()
ok(rtp == dflt_rtp or rtp == dflt_rtp2)
@with_setup(setup=cleanup)
def test_chdir():
pwd = vim.eval('getcwd()')
vim.chdir('/')
eq(vim.eval('getcwd()'), '/')
vim.chdir(pwd)
eq(vim.eval('getcwd()'), pwd)
@with_setup(setup=cleanup)
def test_current_line():
eq(vim.current.line, '')
vim.current.line = 'abc'
eq(vim.current.line, 'abc')
@with_setup(setup=cleanup)
def test_vars():
vim.vars['python'] = [1, 2, {'3': 1}]
eq(vim.vars['python'], [1, 2, {'3': 1}])
eq(vim.eval('g:python'), [1, 2, {'3': 1}])
@with_setup(setup=cleanup)
def test_options():
eq(vim.options['listchars'], 'tab:> ,trail:-,nbsp:+')
vim.options['listchars'] = 'tab:xy'
eq(vim.options['listchars'], 'tab:xy')
@with_setup(setup=cleanup)
def test_buffers():
eq(len(vim.buffers), 1)
eq(vim.buffers[0], vim.current.buffer)
vim.command('new')
eq(len(vim.buffers), 2)
eq(vim.buffers[1], vim.current.buffer)
vim.current.buffer = vim.buffers[0]
eq(vim.buffers[0], vim.current.buffer)
@with_setup(setup=cleanup)
def test_windows():
eq(len(vim.windows), 1)
eq(vim.windows[0], vim.current.window)
vim.command('vsplit')
vim.command('split')
eq(len(vim.windows), 3)
eq(vim.windows[0], vim.current.window)
vim.current.window = vim.windows[1]
eq(vim.windows[1], vim.current.window)
@with_setup(setup=cleanup)
def test_tabpages():
eq(len(vim.tabpages), 1)
eq(vim.tabpages[0], vim.current.tabpage)
vim.command('tabnew')
eq(len(vim.tabpages), 2)
eq(len(vim.windows), 2)
eq(vim.windows[1], vim.current.window)
eq(vim.tabpages[1], vim.current.tabpage)
vim.current.window = vim.windows[0]
# Switching window also switches tabpages if necessary(this probably
# isn't the current behavior, but compatibility will be handled in the
# python client with an optional parameter)
eq(vim.tabpages[0], vim.current.tabpage)
eq(vim.windows[0], vim.current.window)
vim.current.tabpage = vim.tabpages[1]
eq(vim.tabpages[1], vim.current.tabpage)
eq(vim.windows[1], vim.current.window)
@with_setup(setup=cleanup)
def test_hash():
d = {}
d[vim.current.buffer] = "alpha"
eq(d[vim.current.buffer], "alpha")
vim.command('new')
d[vim.current.buffer] = "beta"
eq(d[vim.current.buffer], "beta")
vim.command('winc w')
eq(d[vim.current.buffer], "alpha")
vim.command('winc w')
eq(d[vim.current.buffer], "beta")
|
Python
| 0.000001 |
@@ -1462,589 +1462,8 @@
4)%0A%0A
-%0A@with_setup(setup=cleanup)%0Adef test_list_runtime_paths():%0A # Is this the default runtime path list?%0A homedir = os.path.join(os.environ%5B'HOME'%5D, '.nvim')%0A vimdir = vim.eval('$VIM')%0A dflt_rtp = %5B%0A homedir,%0A os.path.join(vimdir, 'vimfiles'),%0A vimdir,%0A os.path.join(vimdir, 'vimfiles', 'after')%0A %5D%0A # If the runtime is installed the default path%0A # is nvim/runtime%0A dflt_rtp2 = list(dflt_rtp)%0A dflt_rtp2%5B2%5D = os.path.join(dflt_rtp2%5B2%5D, 'runtime')%0A%0A rtp = vim.list_runtime_paths()%0A ok(rtp == dflt_rtp or rtp == dflt_rtp2)%0A%0A%0A
@wit
|
453bd97b7897da0f6fd6aea2517828962923d2f0
|
Fix test for python 3.
|
test/test_who.py
|
test/test_who.py
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import nose
from nose.tools import *
import unittest
from whoswho import who, config, utils
from nameparser.config.titles import TITLES as NAMEPARSER_TITLES
from nameparser.config.suffixes import SUFFIXES as NAMEPARSER_SUFFIXES
class TestFullNames(unittest.TestCase):
def setUp(self):
self.name = 'Robert Evan Liebowitz'
def test_string(self):
assert_true(who.match(self.name, 'Robert Liebowitz'.encode('utf-8')))
def test_unicode(self):
name = self.name
assert_true(who.match(name, 'attaché Robert Evan Liebowitz'))
assert_true(who.match(name, 'Rōbért Èvān Lîęböwitz'))
assert_false(who.match(name, 'Rōbért Èvān Lęîböwitz'))
def test_name_and_initials(self):
assert_true(who.match(self.name, 'R. Evan Liebowitz'))
assert_true(who.match(self.name, 'Robert E. Liebowitz'))
assert_true(who.match(self.name, 'R. E. Liebowitz'))
def test_different_number_initials(self):
assert_true(who.match(self.name, 'Robert Liebowitz'))
assert_true(who.match(self.name, 'R. Liebowitz'))
assert_false(who.match(self.name, 'Robert E. E. Liebowitz'))
assert_false(who.match(self.name, 'R. E. E. Liebowitz'))
assert_true(who.match('R.E.E. Liebowitz', 'R. E. E. Liebowitz'))
def test_different_initials(self):
assert_false(who.match(self.name, 'E. R. Liebowitz'))
assert_false(who.match(self.name, 'E. Liebowitz'))
assert_false(who.match(self.name, 'R. V. Liebowitz'))
assert_false(who.match(self.name, 'O. E. Liebowitz'))
def test_short_names(self):
assert_true(who.match(self.name, 'Rob Liebowitz'))
# TODO: Should these be true?
assert_false(who.match(self.name, 'Bert Liebowitz'))
assert_false(who.match(self.name, 'Robbie Liebowitz'))
def test_suffixes(self):
name = 'Robert Liebowitz Jr'
assert_true(who.match(name, 'Robert Liebowitz'))
assert_true(who.match(name, 'Robert Liebowitz Jr'))
assert_true(who.match(name, 'Robert Liebowitz, PhD'))
assert_false(who.match(name, 'Robert Liebowitz, Sr'))
assert_false(who.match(name, 'Robert Liebowitz, Sr, PhD'))
assert_true(who.match(name, 'Robert Liebowitz, Jr, PhD'))
def test_equivalent_suffixes(self):
name = 'Robert Liebowitz Jr'
assert_true(who.match(name, 'Robert Liebowitz Jnr'))
assert_false(who.match(name, 'Robert Liebowitz Snr'))
def test_titles(self):
name = 'Mr. Robert Liebowitz'
assert_true(who.match(name, 'Robert Liebowitz'))
assert_true(who.match(name, 'Sir Robert Liebowitz'))
assert_true(who.match(name, 'Dr. Robert Liebowitz'))
assert_false(who.match(name, 'Mrs. Robert Liebowitz'))
class TestConfig(unittest.TestCase):
def test_titles_all_defined(self):
"""
Check if list of titles is up to date with nameparser
"""
all_titles = (
config.MALE_TITLES |
config.FEMALE_TITLES |
config.GENDERLESS_TITLES
)
assert_equal(all_titles, NAMEPARSER_TITLES)
def test_suffixes_all_defined(self):
"""
Check if list of suffixes is up to date with nameparser
"""
all_suffixes = (
config.UNIQUE_SUFFIXES |
config.MISC_SUFFIXES
)
assert_equal(all_suffixes, NAMEPARSER_SUFFIXES)
class TestUtils(unittest.TestCase):
def test_equate_prefix_to_name(self):
assert_true(utils.equate_prefix('r', 'robert'))
assert_true(utils.equate_prefix('rob', 'robert'))
assert_false(utils.equate_prefix('robbie', 'robert'))
assert_false(utils.equate_prefix('bert', 'robert'))
def test_make_ascii(self):
assert_equal(
utils.make_ascii("foo bar .,?;'!@#$%^&*()"),
"foo bar .,?;'!@#$%^&*()"
)
assert_equal(
utils.make_ascii('äèîõù'),
'aeiou'
)
def test_strip_punctuation(self):
assert_equal(
utils.strip_punctuation('abcde aeiou'),
'abcde aeiou'
)
assert_equal(
utils.strip_punctuation("abcde.' aeiou"),
'abcde aeiou'
)
def test_equate_nickname(self):
assert_true(utils.equate_nickname('robert', 'rob'))
assert_true(utils.equate_nickname('robert', 'robby'))
assert_true(utils.equate_nickname('robert', 'robbie'))
assert_true(utils.equate_nickname('robbie', 'robby'))
assert_false(utils.equate_nickname('robert', 'robin'))
assert_false(utils.equate_nickname('harold', 'harriet'))
if __name__ == '__main__':
nose.main()
|
Python
| 0.000001 |
@@ -110,16 +110,27 @@
unittest
+%0Aimport sys
%0A%0Afrom w
@@ -427,32 +427,71 @@
t_string(self):%0A
+ # Only relevant for python 2.X%0A
assert_t
@@ -507,32 +507,36 @@
atch(self.name,
+str(
'Robert Liebowit
@@ -541,23 +541,8 @@
itz'
-.encode('utf-8'
)))%0A
|
d15c2107f4132b53fb77622748753bb9f3c2916f
|
Update messenger.py
|
bot/messenger.py
|
bot/messenger.py
|
# -*- coding: utf-8 -*-
import logging
import random
import sys
reload(sys)
sys.setdefaultencoding('utf8')
logger = logging.getLogger(__name__)
class Messenger(object):
def __init__(self, slack_clients):
self.clients = slack_clients
def send_message(self, channel_id, msg):
# in the case of Group and Private channels, RTM channel payload is a complex dictionary
if isinstance(channel_id, dict):
channel_id = channel_id['id']
logger.debug('Sending msg: %s to channel: %s' % (msg, channel_id))
channel = self.clients.rtm.server.channels.find(channel_id)
channel.send_message(msg)
def write_help_message(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = '{}\n{}\n{}\n'.format(
"Hi :wave:, who doesnt like a good quote ?",
"> `hi <@" + bot_uid + ">` - I'll respond with a randomized greeting mentioning you. :wave:",
"> `<@" + bot_uid + "> Quote` - I'll tell you one of my finest quotes"
)
self.send_message(channel_id, txt)
def write_greeting(self, channel_id, user_id):
greetings = ["Do you feel lucky ....", "Greetings ....","Winter is coming...", "Valar Morghulis...","Say hello to my little friend...","You talkin to me .."]
txt = '{} <@{}>!'.format(random.choice(greetings), user_id)
self.send_message(channel_id, txt)
def write_prompt(self, channel_id):
bot_uid = self.clients.bot_user_id()
txt = "Whoa ... spell it out for me.. please ? (e.g. `<@" + bot_uid + "> help`)"
self.send_message(channel_id, txt)
def write_quote(self, channel_id):
quotes=["To infinity…and beyond!","I have come here to chew bubblegum and kick ass, and Im all out of bubblegum.","Surely, you cant be serious – I am serious, and dont call me Shirley.","I pity the fool","There can be only juan","THIS IS SPARTA!!!!","Shit just got real","Its clobberin time!","Go ahead, make my day....","Run, Forrest, run!!!","Im too old for this shit..","Ill be back","SHOW ME THE MONEY!!!","Greed, for lack of a better word, is good..","You cant handle the truth!","Snap out of it!","I feel the need…the need for speed","Youre gonna need a bigger boat","I see dead people","Great scott!","Life is like a box of chocolates: you never know what youre gonna get","Im gonna make him an offer he cant refuse","They may take our lives, but theyll never take…OUR FREEDOM!","Oh, behave!","You had me at hello","Im not bad. Im just drawn that way","Ssssssssssssmokin","Ill have what shes having","Wax on, wax off. Wax on, wax off","Hakuna Matata","Im sorry,Sharpy...Im afraid I cant do that",":spock-hand::skin-tone-2: Live long and prosper :spock-hand::skin-tone-2:"]
txt = random.choice(quotes)
self.clients.send_user_typing_pause(channel_id)
self.send_message(channel_id, txt)
def write_quoteBB(self, channel_id):
quotesBB=["A guy opens his door and gets shot and you think that of me? No...I AM THE ONE WHO KNOCKS","Whats the point of being an outlaw when you got responsibilities?","Stay out of my territory","This is my own private domicile and I will not be harassed…bitch!"]
txt = random.choice(quotesBB)
self.clients.send_user_typing_pause(channel_id)
self.send_message(channel_id, txt)
def write_error(self, channel_id, err_msg):
txt = ":face_with_head_bandage: Houston, we have a problem :\n>```{}```".format(err_msg)
self.send_message(channel_id, txt)
|
Python
| 0.000001 |
@@ -1675,999 +1675,38 @@
=%5B%22T
-o infinity%E2%80%A6and beyond!%22,%22I have come here to chew bubblegum and kick ass, and Im all out of bubblegum.%22,%22Surely, you cant be serious %E2%80%93 I am serious, and dont call me Shirley.%22,%22I pity the fool%22,%22There can be only juan%22,%22THIS IS SPARTA!!!!%22,%22Shit just got real%22,%22Its clobberin time!%22,%22Go ahead, make my day....%22,%22Run, Forrest, run!!!%22,%22Im too old for this shit..%22,%22Ill be back%22,%22SHOW ME THE MONEY!!!%22,%22Greed, for lack of a better word, is good..%22,%22You cant handle the truth!%22,%22Snap out of it!%22,%22I feel the need%E2%80%A6the need for speed%22,%22Youre gonna need a bigger boat%22,%22I see dead people%22,%22Great scott!%22,%22Life is like a box of chocolates: you never know what youre gonna get%22,%22Im gonna make him an offer he cant refuse%22,%22They may take our lives, but theyll never take%E2%80%A6OUR FREEDOM!%22,%22Oh, behave!%22,%22You had me at hello%22,%22Im not bad. Im just drawn that way%22,%22Ssssssssssssmokin%22,%22Ill have what shes having%22,%22Wax on, wax off. Wax on, wax off%22,%22Hakuna Matata%22,%22Im sorry,Sharpy...Im afraid I cant do that
+here can be only @juantwothree
%22,%22:
|
9e2948e7725da996399b7f80fe9c53e2c4a0c848
|
Correct typo in docs.
|
django/applications/catmaid/control/project.py
|
django/applications/catmaid/control/project.py
|
import json
from collections import defaultdict
from django.contrib import auth
from django.db import connection
from django.http import HttpResponse
from django.shortcuts import get_object_or_404
from catmaid.models import *
from catmaid.control.authentication import *
from catmaid.control.common import *
from guardian.shortcuts import get_objects_for_user
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def list_project_tags(request, project_id=None):
""" Return the tags associated with the project.
"""
p = get_object_or_404(Project, pk=project_id)
tags = [ str(t) for t in p.tags.all()]
result = {'tags':tags}
return HttpResponse(json.dumps(result, sort_keys=True, indent=4), mimetype="text/json")
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def update_project_tags(request, project_id=None, tags=None):
""" Updates the given project with the supplied tags. All
existing tags will be replaced.
"""
p = get_object_or_404(Project, pk=project_id)
# Create list of sigle stripped tags
if tags is None:
tags = []
else:
tags = tags.split(",")
tags = [t.strip() for t in tags]
# Add tags to the model
p.tags.set(*tags)
# Return an empty closing response
return HttpResponse(json.dumps(""), mimetype="text/json")
class ExProject:
""" A wrapper around the Project model to include additional
properties.
"""
def __init__(self, project, is_editable, is_catalogueable):
self.project = project
self.is_editable = is_editable
self.is_catalogueable = is_catalogueable
def __getattr__(self, attr):
""" Return own property when available, otherwise proxy
to project.
"""
if attr in self.__dict__:
return getattr(self,attr)
return getattr(self.project, attr)
def extend_projects(user, projects):
""" Adds the properties is_editable and is_catalogueable to all
projects passed.
"""
# Create sets of projects that are administrable and annotatable
# by the current user and unify them to one set. This will only
# work for authenticated users (i.e. not AnonymousUser)
administrable_projects = set(get_objects_for_user(user, 'can_administer', Project))
annotatable_projects = set(get_objects_for_user(user, 'can_annotate', Project))
administrable_projects.union(annotatable_projects)
# Just for readability, have another reference to the union
editable_projects = administrable_projects
# Find all the projects that are editable:
catalogueable_projects = set(x.project.id for x in \
Class.objects.filter(class_name='driver_line').select_related('project'))
result = []
for p in projects:
ex_p = ExProject(p,
user.is_superuser or p in editable_projects,
p.id in catalogueable_projects)
result.append(ex_p)
return result
def get_project_qs_for_user(user):
""" Returns the query set of projects that are adminiserable and
browsable by the given user.
"""
perms=['can_administer', 'can_annotate', 'can_browse']
return get_objects_for_user(user, perms, Project, any_perm=True)
def projects(request):
# This is somewhat ridiculous - four queries where one could be
# used in raw SQL. The problem here is chiefly that
# 'select_related' in Django doesn't work through
# ManyToManyFields. Development versions of Django have
# introduced prefetch_related, but this isn't in the stable
# version that I'm using. (Another way around this would be to
# query on ProjectStack, but the legacy CATMAID schema doesn't
# include a single-column primary key for that table.)
stacks = dict((x.id, x) for x in Stack.objects.all())
# Create a dictionary that maps from projects to stacks:
c = connection.cursor() #@UndefinedVariable
c.execute("SELECT project_id, stack_id FROM project_stack")
project_to_stacks = defaultdict(list)
for project_id, stack_id in c.fetchall():
project_to_stacks[project_id].append(stacks[stack_id])
# Get all projects that are visisble for the current user
projects = get_project_qs_for_user(request.user).order_by('title')
# Extend projects with extra editable and catalogueable info
projects = extend_projects(request.user, projects)
# Create a dictionary with those results that we can output as JSON:
result = []
for p in projects:
if p.id not in project_to_stacks:
continue
stacks_dict = {}
for s in project_to_stacks[p.id]:
stacks_dict[s.id] = {
'title': s.title,
'comment': s.comment,
'note': '',
'action': 'javascript:openProjectStack(%d,%d)' % (p.id, s.id)}
editable = p.is_editable
result.append( {
'pid': p.id,
'title': p.title,
'public_project': int(p.public),
'editable': int(p.is_editable),
'catalogue': int(p.is_catalogueable),
'note': '[ editable ]' if p.is_editable else '',
'action': stacks_dict} )
return HttpResponse(json.dumps(result, sort_keys=True, indent=4), mimetype="text/json")
|
Python
| 0.000001 |
@@ -3031,17 +3031,17 @@
adminis
-e
+t
rable an
|
dd9843c97c9e15c2522034a6f5333f68714cd031
|
copy with original content type
|
filer/storage.py
|
filer/storage.py
|
#-*- coding: utf-8 -*-
import urllib.request, urllib.parse, urllib.error
from django.core.files.storage import FileSystemStorage
from django.utils.encoding import smart_str
try:
from storages.backends.s3boto import S3BotoStorage
except ImportError:
from storages.backends.s3boto3 import S3Boto3Storage as S3BotoStorage
class PublicFileSystemStorage(FileSystemStorage):
"""
File system storage that saves its files in the filer public directory
See ``filer.settings`` for the defaults for ``location`` and ``base_url``.
"""
is_secure = False
class PrivateFileSystemStorage(FileSystemStorage):
"""
File system storage that saves its files in the filer private directory.
This directory should NOT be served directly by the web server.
See ``filer.settings`` for the defaults for ``location`` and ``base_url``.
"""
is_secure = True
def filepath_to_url(path):
if path is None:
return path
return urllib.parse.quote(smart_str(path).replace("\\", "/"), safe="/~!*()")
class PatchedS3BotoStorage(S3BotoStorage):
def url(self, name):
if self.custom_domain:
name = filepath_to_url(self._normalize_name(self._clean_name(name)))
return "%s://%s/%s" % ('https' if self.secure_urls else 'http',
self.custom_domain, name)
return self.connection.generate_url(
self.querystring_expire,
method='GET', bucket=self.bucket.name, key=self._encode_name(name),
query_auth=self.querystring_auth, force_http=not self.secure_urls)
def has_public_read(self, path):
old_acl = self.bucket.Object(path).Acl().grants
if not old_acl:
return False
for right in old_acl:
if (
'AllUsers' in right.get('Grantee', {}).get('URI', '') and
right.get('Permission', '').upper() == 'READ'
):
return True
return False
def copy(self, src_name, dst_name):
src_path = self._normalize_name(self._clean_name(src_name))
dst_path = self._normalize_name(self._clean_name(dst_name))
copy_source = {
'Bucket': self.bucket.name,
'Key': src_path
}
extra_args = {}
# we cannot preserve acl in boto3, but we can give public read
if self.has_public_read(src_path):
extra_args = {
'ACL': 'public-read'
}
self.bucket.copy(copy_source, dst_path, extra_args)
|
Python
| 0 |
@@ -1625,20 +1625,26 @@
d(self,
-path
+object_key
):%0A
@@ -1660,32 +1660,18 @@
l =
-self.bucket.Object(path)
+object_key
.Acl
@@ -2350,16 +2350,66 @@
ic read%0A
+ source_obj = self.bucket.Object(src_path)%0A
@@ -2433,23 +2433,25 @@
c_read(s
-rc_path
+ource_obj
):%0A
@@ -2508,16 +2508,72 @@
ic-read'
+,%0A 'ContentType': source_obj.content_type
%0A
|
19b77442ee3cc80d8c7eaee6bde6c87d6a9e9277
|
Test a fix for the wheel test
|
tests/integration/modules/saltutil.py
|
tests/integration/modules/saltutil.py
|
# -*- coding: utf-8 -*-
'''
Integration tests for the saltutil module.
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing libs
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
import integration
class SaltUtilModuleTest(integration.ModuleCase):
'''
Testcase for the saltutil execution module
'''
# Tests for the wheel function
def test_wheel_just_function(self):
'''
Tests using the saltutil.wheel function when passing only a function.
'''
ret = self.run_function('saltutil.wheel', ['minions.connected'])
self.assertIn('minion', ret['return'])
self.assertIn('sub_minion', ret['return'])
def test_wheel_with_arg(self):
'''
Tests using the saltutil.wheel function when passing a function and an arg.
'''
ret = self.run_function('saltutil.wheel', ['key.list', 'minion'])
self.assertEqual(ret['return'], {})
def test_wheel_no_arg_raise_error(self):
'''
Tests using the saltutil.wheel function when passing a function that requires
an arg, but one isn't supplied.
'''
self.assertRaises(TypeError, 'saltutil.wheel', ['key.list'])
def test_wheel_with_kwarg(self):
'''
Tests using the saltutil.wheel function when passing a function and a kwarg.
This function just generates a key pair, but doesn't do anything with it. We
just need this for testing purposes.
'''
ret = self.run_function('saltutil.wheel', ['key.gen'], keysize=1024)
self.assertIn('pub', ret['return'])
self.assertIn('priv', ret['return'])
if __name__ == '__main__':
from integration import run_tests
run_tests(SaltUtilModuleTest)
|
Python
| 0 |
@@ -128,16 +128,28 @@
e_import
+%0Aimport time
%0A%0A# Impo
@@ -403,16 +403,92 @@
'''%0A%0A
+ def setUp(self):%0A self.run_function('saltutil.refresh_pillar') %0A%0A
# Te
@@ -645,16 +645,16 @@
nction.%0A
-
@@ -649,32 +649,136 @@
on.%0A '''%0A
+ # Wait for the pillar refresh to kick in, so that grains are ready to go%0A time.sleep(3) %0A
ret = se
|
40d2de6f25a4081dac3d809c9d0b8d20478cf92c
|
Tidy test settings and introduce django-nose
|
wagtailmenus/tests/settings.py
|
wagtailmenus/tests/settings.py
|
import os
import hashlib
from django.conf.global_settings import * # NOQA
DEBUG = True
SITE_ID = 1
DATABASES = {
'default': {
'NAME': 'wagtailmenus.sqlite',
'TEST_NAME': 'wagtailmenus_test.sqlite',
'ENGINE': 'django.db.backends.sqlite3',
}
}
TIME_ZONE = 'Europe/London'
USE_TZ = True
USE_I18N = True
USE_L10N = True
INSTALLED_APPS = (
'wagtailmenus.tests',
'wagtailmenus',
'wagtail.wagtailforms',
'wagtail.wagtailsearch',
'wagtail.wagtailembeds',
'wagtail.wagtailimages',
'wagtail.wagtailsites',
'wagtail.wagtailusers',
'wagtail.wagtailsnippets',
'wagtail.wagtaildocs',
'wagtail.wagtailredirects',
'wagtail.wagtailadmin',
'wagtail.api',
'wagtail.wagtailcore',
'wagtailmodeladmin',
'taggit',
'modelcluster',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.sites'
)
PROJECT_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
STATIC_ROOT = os.path.join(PROJECT_ROOT, 'test-static')
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(PROJECT_ROOT, 'test-media')
MEDIA_URL = '/media/'
SECRET_KEY = 'fake-key'
ROOT_URLCONF = 'wagtailmenus.tests.urls'
LOGIN_URL = 'wagtailadmin_login'
LOGIN_REDIRECT_URL = 'wagtailadmin_home'
# =============================================================================
# Templates
# =============================================================================
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(PROJECT_ROOT, 'templates'),
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'wagtail.contrib.settings.context_processors.settings',
],
},
},
]
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'wagtail.wagtailcore.middleware.SiteMiddleware',
'wagtail.wagtailredirects.middleware.RedirectMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'compressor.finders.CompressorFinder',
)
|
Python
| 0 |
@@ -155,16 +155,21 @@
ailmenus
+_test
.sqlite'
@@ -201,32 +201,37 @@
agtailmenus_test
+_test
.sqlite',%0A
@@ -379,14 +379,8 @@
= (%0A
- %0A%0A
@@ -779,24 +779,43 @@
odeladmin',%0A
+ 'django_nose',%0A
'taggit'
@@ -1355,16 +1355,48 @@
.urls'%0A%0A
+WAGTAIL_SITE_NAME = 'Test site'%0A
LOGIN_UR
@@ -1420,16 +1420,16 @@
_login'%0A
-
LOGIN_RE
@@ -1462,16 +1462,323 @@
_home'%0A%0A
+%0A# =============================================================================%0A# django-nose config%0A# =============================================================================%0A%0ATEST_RUNNER = 'django_nose.NoseTestSuiteRunner'%0ANOSE_ARGS = %5B%0A '--with-coverage',%0A '--cover-package=wagtailmenus',%0A%5D%0A%0A
# ======
|
b359d71a3c13720257167c57959a18b7a5ce9d07
|
Fix Python package version checking
|
gravity/tilt/tilt_tests.py
|
gravity/tilt/tilt_tests.py
|
import os, subprocess, sys
import pkg_resources
from packaging import version
# This function is used ot check if an apt package is installed on Raspbian, Ubuntu, Debian, etc.
def apt_package_installed(package_name: str) -> bool:
devnull = open(os.devnull,"w")
retval = subprocess.call(["dpkg", "-s", package_name],stdout=devnull,stderr=subprocess.STDOUT)
devnull.close()
if retval != 0:
return False
return True
# This is just a means to check if apt (dpkg) is installed at all
def has_apt() -> bool:
try:
devnull = open(os.devnull,"w")
retval = subprocess.call(["dpkg", "--version"],stdout=devnull,stderr=subprocess.STDOUT)
devnull.close()
if retval != 0:
return False
return True
except:
# dpkg doesn't exist
return False
def check_apt_packages() -> (bool, list):
package_list = ["bluez", "libcap2-bin", "libbluetooth3", "libbluetooth-dev", "redis-server", "python3-dev"]
test_results = []
all_packages_ok = True
for package in package_list:
result = {'package': package, 'result': True}
if apt_package_installed(package):
result['result'] = True
else:
result ['result'] = False
all_packages_ok = False
test_results.append(result)
return all_packages_ok, test_results
def check_python_packages() -> (bool, list):
if sys.platform == "darwin":
# The MacOS support uses different packages from the support for Linux
package_list = [
{'name': 'PyObjc', 'version': version.parse("6.2")},
{'name': 'redis', 'version': version.parse("3.4.1")},
]
else:
package_list = [
{'name': 'PyBluez', 'version': version.parse("0.23")},
{'name': 'aioblescan', 'version': version.parse("0.2.6")},
{'name': 'redis', 'version': version.parse("3.4.1")},
]
test_results = []
all_packages_ok = True
for package_to_find in package_list:
result_stub = {
'package': package_to_find['name'],
'required_version': package_to_find['version'],
'installed_version': None,
'ok': False,
}
for package in pkg_resources.working_set:
if package.project_name == package_to_find['name']:
result_stub['installed_version'] = package.parsed_version
if result_stub['installed_version'] == result_stub['required_version']:
result_stub['ok'] = True
if result_stub['ok'] is False:
all_packages_ok = False
test_results.append(result_stub)
return all_packages_ok, test_results
# The following was used for testing during development
if __name__ == "__main__":
if has_apt():
apt_ok, apt_test_results = check_apt_packages()
if apt_ok:
print("All apt packages found. Package status:")
else:
print("Missing apt packages. Package status:")
for this_test in apt_test_results:
print("Package {}: {}".format(this_test['package'],
("Installed" if this_test['result'] else "Not Installed")))
else:
print("dpkg not installed - not checking to see if system packages are installed")
print("")
# Next, check the python packages
python_ok, python_test_results = check_python_packages()
if python_ok:
print("All required python packages found. Package status:")
else:
print("Missing/incorrect python packages. Package status:")
for this_test in python_test_results:
print("Package {} - Required Version {} - Installed Version {} - OK? {}".format(
this_test['package'], this_test['required_version'], this_test['installed_version'], this_test['ok']))
print("")
|
Python
| 0.000026 |
@@ -1575,11 +1575,11 @@
': '
-PyO
+pyo
bjc'
@@ -2477,16 +2477,23 @@
ersion'%5D
+.public
== resu
@@ -2519,16 +2519,23 @@
ersion'%5D
+.public
:%0A
|
4338b097f97bb03be27c81a810a5fc652f842c8a
|
change cnab processor selection to method"
|
l10n_br_account_payment_brcobranca/models/account_payment_mode.py
|
l10n_br_account_payment_brcobranca/models/account_payment_mode.py
|
# Copyright (C) 2012-Today - KMEE (<http://kmee.com.br>).
# @author Luis Felipe Miléo - [email protected]
# @author Renato Lima - [email protected]
# Copyright (C) 2021-Today - Akretion (<http://www.akretion.com>).
# @author Magno Costa <[email protected]>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import fields, models
class AccountPaymentMode(models.Model):
"""
Override Account Payment Mode
"""
_inherit = "account.payment.mode"
cnab_processor = fields.Selection(
selection_add=[("brcobranca", "BRCobrança")],
)
|
Python
| 0 |
@@ -354,22 +354,19 @@
import
-fields
+api
, models
@@ -506,42 +506,115 @@
-cnab_processor = fields.Selection(
[email protected]%0A def _selection_cnab_processor(self):%0A selection = super()._selection_cnab_processor()
%0A
@@ -631,14 +631,16 @@
tion
-_add=%5B
+.append(
(%22br
@@ -667,13 +667,31 @@
%C3%A7a%22)
-%5D,%0A )
+)%0A return selection
%0A
|
3542dccc5e6a9718716695bd041ad302f2576a4d
|
Use uuid() to generate unique XQueue submission IDs during import of submissions
|
controller/management/commands/import_graded_essays.py
|
controller/management/commands/import_graded_essays.py
|
from django.core.management.base import BaseCommand
from django.conf import settings
from django.utils import timezone
#from http://jamesmckay.net/2009/03/django-custom-managepy-commands-not-committing-transactions/
#Fix issue where db data in manage.py commands is not refreshed at all once they start running
from django.db import transaction
transaction.commit_unless_managed()
import requests
import urlparse
import time
import json
import logging
import sys
from ConfigParser import SafeConfigParser
from datetime import datetime
import test_util
from controller.models import Submission, Grader
from controller.models import GraderStatus, SubmissionState
import controller.rubric_functions
import random
from controller import grader_util
log = logging.getLogger(__name__)
class Command(BaseCommand):
args = "<filename>"
help = "Poll grading controller and send items to be graded to ml"
def handle(self, *args, **options):
"""
Read from file
"""
parser = SafeConfigParser()
parser.read(args[0])
print("Starting import...")
print("Reading config from file {0}".format(args[0]))
header_name = "importdata"
location = parser.get(header_name, 'location')
course_id = parser.get(header_name, 'course_id')
problem_id = parser.get(header_name, 'problem_id')
prompt_file = parser.get(header_name, 'prompt_file')
essay_file = parser.get(header_name, 'essay_file')
essay_limit = int(parser.get(header_name, 'essay_limit'))
state = parser.get(header_name, "state")
next_grader_type = parser.get(header_name, "next_grader")
add_grader = parser.get(header_name, "add_grader_object") == "True"
set_as_calibration = parser.get(header_name, "set_as_calibration") == "True"
max_score= parser.get(header_name,"max_score")
student_id = parser.get(header_name,'student_id')
increment_ids = parser.get(header_name,'increment_ids')
rubric_file = parser.get(header_name, 'rubric_file')
import_rubric_scores = parser.get(header_name, 'import_rubric_scores') == "True"
rubric_scores_file = parser.get(header_name, 'rubric_scores_file')
rubric=open(settings.REPO_PATH / rubric_file).read()
prompt=open(settings.REPO_PATH / prompt_file).read()
score, text = [], []
combined_raw = open(settings.REPO_PATH / essay_file).read()
raw_lines = combined_raw.splitlines()
for row in xrange(1, len(raw_lines)):
score1, text1 = raw_lines[row].strip().split("\t")
text.append(text1)
score.append(int(score1))
if increment_ids:
student_id = int(student_id)
if import_rubric_scores:
rubric_scores=[]
combined_raw = open(settings.REPO_PATH / rubric_scores_file).read()
raw_lines = combined_raw.splitlines()
for row in xrange(1, len(raw_lines)):
rubric_score_row=[]
for score_item in raw_lines[row].strip().split("\t"):
rubric_score_row.append(int(score_item))
rubric_scores.append(rubric_score_row)
for i in range(0, min(essay_limit, len(text))):
sub = Submission(
prompt=prompt,
student_id=student_id,
problem_id=problem_id,
state=state,
student_response=text[i],
student_submission_time=timezone.now(),
xqueue_submission_id=test_util.generate_new_xqueue_id(),
xqueue_submission_key="",
xqueue_queue_name="",
location=location,
course_id=course_id,
next_grader_type=next_grader_type,
posted_results_back_to_queue=True,
previous_grader_type="BC",
max_score=max_score,
rubric=rubric,
preferred_grader_type = next_grader_type,
)
sub.save()
if add_grader:
sub.previous_grader_type="IN"
sub.save()
grade = Grader(
score=score[i],
feedback="",
status_code=GraderStatus.success,
grader_id="",
grader_type="IN",
confidence=1,
is_calibration=set_as_calibration,
)
grade.submission = sub
grade.save()
success, rubric_targets=controller.rubric_functions.generate_targets_from_rubric(sub.rubric)
scores=[]
for z in xrange(0,len(rubric_targets)):
scores.append(random.randint(0,rubric_targets[z]))
if import_rubric_scores:
score_item = rubric_scores[i]
if len(score_item) == len(scores):
scores = score_item
log.debug("Score: {0} Rubric Score: {1}".format(score[i], scores))
controller.rubric_functions.generate_rubric_object(grade, scores, sub.rubric)
if increment_ids:
student_id+=1
print ("Successfully imported {0} essays using configuration in file {1}.".format(
min(essay_limit, len(text)),
args[0],
))
|
Python
| 0 |
@@ -458,16 +458,39 @@
ort sys%0A
+from uuid import uuid4%0A
from Con
@@ -556,25 +556,8 @@
time
-%0Aimport test_util
%0A%0Afr
@@ -3574,42 +3574,19 @@
_id=
-test_util.generate_new_xqueue_
+uu
id
+4
()
+.hex
,%0A
|
0bbd10058ff58ca5160e74374c0b34f99c429ad8
|
Update docstrings
|
openpathsampling/high_level/part_in_b_tps.py
|
openpathsampling/high_level/part_in_b_tps.py
|
from openpathsampling.high_level.network import FixedLengthTPSNetwork
from openpathsampling.high_level.transition import FixedLengthTPSTransition
import openpathsampling as paths
class PartInBFixedLengthTPSTransition(FixedLengthTPSTransition):
"""Fixed length TPS transition accepting any frame in the final state.
Implements the ensemble in [1]_. Details in :class:`.PartInBNetwork`.
See also
--------
PartInBNetwork
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
def _tps_ensemble(self, stateA, stateB):
return paths.SequentialEnsemble([
paths.LengthEnsemble(1) & paths.AllInXEnsemble(stateA),
paths.LengthEnsemble(self.length - 1) \
& paths.PartInXEnsemble(stateB)
])
class PartInBFixedLengthTPSNetwork(FixedLengthTPSNetwork):
"""Network for fixed-length TPS accepting any frame in the final state
This network samples a single path ensemble where the paths must begin
in an initial state, run for a fixed total number of frames, and must
have at least one frame in a final state. This was used to assist in
the flux part of the rate calculation in Ref. [1]_. This version is
generalized to multiple states.
Parameters
----------
intial_states : (list of) :class:`.Volume`
acceptable initial states
final_states : (list of) :class:`.Volume`
acceptable final states
length : int
length of paths in the path ensemble, in frames
allow_self_transitions : bool
whether self-transitions (A->A) are allowed; default is False. For
this network, A->B->A transitions are *always* allowed.
References
----------
.. [1] C. Dellago, P.G. Bolhuis, and D. Chandler. J. Chem. Phys. 110,
6617 (1999). http://dx.doi.org/10.1063/1.478569
"""
TransitionType = PartInBFixedLengthTPSTransition
|
Python
| 0.000001 |
@@ -322,40 +322,112 @@
-Implements the ensemble in
+Transition that builds an ensemble used to facilitate the rate%0A calculation in fixed-length TPS.
%5B1%5D_
-.
Det
@@ -433,16 +433,20 @@
tails in
+%0A
:class:
@@ -450,24 +450,38 @@
ss:%60.PartInB
+FixedLengthTPS
Network%60.%0A%0A
@@ -501,24 +501,24 @@
--------%0A
-
PartInBN
@@ -516,16 +516,30 @@
PartInB
+FixedLengthTPS
Network%0A
@@ -1357,24 +1357,28 @@
part of the
+TPS
rate calcula
@@ -1385,22 +1385,14 @@
tion
- in Ref
. %5B1%5D_
-.
Thi
|
73a9c5f02482abe376ce518f6fcf5e42f322952c
|
Disable open file verification for INSERT cancel tests due to IMPALA-551
|
tests/query_test/test_cancellation.py
|
tests/query_test/test_cancellation.py
|
#!/usr/bin/env python
# Copyright (c) 2012 Cloudera, Inc. All rights reserved.
# Tests query cancellation using the ImpalaService.Cancel API
#
import pytest
import threading
from time import sleep
from tests.beeswax.impala_beeswax import ImpalaBeeswaxException
from tests.common.test_vector import TestDimension
from tests.common.impala_test_suite import ImpalaTestSuite
from tests.util.test_file_parser import QueryTestSectionReader
from tests.verifiers.metric_verifier import MetricVerifier
# Queries to execute. Use the TPC-H dataset because tables are large so queries take some
# time to execute.
QUERIES = ['select l_returnflag from lineitem',
'select count(l_returnflag) from lineitem',
'select * from lineitem limit 50',
]
QUERY_TYPE = ["SELECT", "CTAS"]
# Time to sleep between issuing query and canceling
CANCEL_DELAY_IN_SECONDS = [0, 1, 2, 3, 4]
# Number of times to execute/cancel each query under test
NUM_CANCELATION_ITERATIONS = 1
# Test cancellation on both running and hung queries
DEBUG_ACTIONS = [None, 'WAIT']
class TestCancellation(ImpalaTestSuite):
@classmethod
def get_workload(self):
return 'tpch'
@classmethod
def add_test_dimensions(cls):
super(TestCancellation, cls).add_test_dimensions()
cls.TestMatrix.add_dimension(TestDimension('query', *QUERIES))
cls.TestMatrix.add_dimension(TestDimension('query_type', *QUERY_TYPE))
cls.TestMatrix.add_dimension(TestDimension('cancel_delay', *CANCEL_DELAY_IN_SECONDS))
cls.TestMatrix.add_dimension(TestDimension('action', *DEBUG_ACTIONS))
cls.TestMatrix.add_constraint(lambda v: v.get_value('query_type') != 'CTAS' or (\
v.get_value('table_format').file_format in ['text', 'parquet'] and\
v.get_value('table_format').compression_codec == 'none'))
cls.TestMatrix.add_constraint(lambda v: v.get_value('exec_option')['batch_size'] == 0)
if cls.exploration_strategy() != 'core':
NUM_CANCELATION_ITERATIONS = 3
def cleanup_test_table(self, table_format):
self.execute_query("invalidate metadata")
self.execute_query("drop table if exists ctas_cancel", table_format=table_format)
def execute_cancel_test(self, vector):
query = vector.get_value('query')
query_type = vector.get_value('query_type')
if query_type == "CTAS":
self.cleanup_test_table(vector.get_value('table_format'))
query = "create table ctas_cancel stored as %sfile as %s" %\
(vector.get_value('table_format').file_format, query)
action = vector.get_value('action')
# node ID 0 is the scan node
debug_action = '0:GETNEXT:' + action if action != None else ''
vector.get_value('exec_option')['debug_action'] = debug_action
# Execute the query multiple times, each time canceling it
for i in xrange(NUM_CANCELATION_ITERATIONS):
handle = self.execute_query_async(query, vector.get_value('exec_option'),
table_format=vector.get_value('table_format'))
def fetch_results():
threading.current_thread().fetch_results_error = None
try:
new_client = self.create_impala_client()
new_client.fetch_results(query,handle)
except Exception as e:
# We expect the RPC to fail only when the query is cancelled.
if not (type(e) is ImpalaBeeswaxException and "Cancelled" in str(e)):
threading.current_thread().fetch_results_error = e
finally:
new_client.close_connection()
thread = threading.Thread(target=fetch_results)
thread.start()
sleep(vector.get_value('cancel_delay'))
assert self.client.get_state(handle) != self.client.query_states['EXCEPTION']
cancel_result = self.client.cancel_query(handle)
assert cancel_result.status_code == 0,\
'Unexpected status code from cancel request: %s' % cancel_result
thread.join()
if thread.fetch_results_error is not None:
raise thread.fetch_results_error
if query_type == "CTAS":
self.cleanup_test_table(vector.get_value('table_format'))
# TODO: Add some additional verification to check to make sure the query was
# actually canceled
# Executing the same query without canceling should work fine. Only do this if the
# query has a limit or aggregation
if action is None and ('count' in query or 'limit' in query):
self.execute_query(query, vector.get_value('exec_option'))
def teardown_method(self, method):
# For some reason it takes a little while for the query to get completely torn down
# when the debug action is WAIT, causing TestValidateMetrics.test_metrics_are_zero to
# fail. Introducing a small delay allows everything to quiesce.
# TODO: Figure out a better way to address this
sleep(1)
class TestCancellationParallel(TestCancellation):
@classmethod
def add_test_dimensions(cls):
super(TestCancellationParallel, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v: v.get_value('query_type') != 'CTAS')
def test_cancel_select(self, vector):
self.execute_cancel_test(vector)
class TestCancellationSerial(TestCancellation):
@classmethod
def add_test_dimensions(cls):
super(TestCancellationSerial, cls).add_test_dimensions()
cls.TestMatrix.add_constraint(lambda v: v.get_value('query_type') == 'CTAS')
cls.TestMatrix.add_constraint(lambda v: v.get_value('cancel_delay') != 0)
cls.TestMatrix.add_constraint(lambda v: v.get_value('action') is None)
@pytest.mark.execute_serially
def test_cancel_insert(self, vector):
self.execute_cancel_test(vector)
metric_verifier = MetricVerifier(self.impalad_test_service)
metric_verifier.verify_no_open_files(timeout=10)
|
Python
| 0 |
@@ -5693,16 +5693,27 @@
ervice)%0A
+ try:%0A
metr
@@ -5757,8 +5757,97 @@
out=
-1
+3
0)%0A
+ except AssertionError:%0A pytest.xfail(%22IMPALA-551: File handle leak for INSERT%22)%0A
|
39c32fa537f9f8f511efde608fe6c8e0a4311296
|
test working?
|
test_render_1.py
|
test_render_1.py
|
import sys
sys.path.append('../landsat-util/landsat')
import pytest
import render_1
import models
from sqlalchemy import create_engine, orm
from datetime import datetime
import mock
import unittest
import os
import factory
import factory.alchemy
Session = orm.scoped_session(orm.sessionmaker())
class JobFactory(factory.alchemy.SQLAlchemyModelFactory):
class Meta:
model = models.UserJob_Model
sqlalchemy_session = Session
jobstatus = 0
starttime = datetime.utcnow()
lastmodified = datetime.utcnow()
band1 = u'4'
band2 = u'3'
band3 = u'2'
entityid = u'LC80470272015005LGN00'
email = u'[email protected]'
jobid = factory.Sequence(lambda n: n)
@pytest.fixture(scope='session')
def connection(request):
engine = create_engine('postgresql://postgres@/test_bar')
models.Base.metadata.create_all(engine)
connection = engine.connect()
models.DBSession.registry.clear()
models.DBSession.configure(bind=connection)
models.Base.metadata.bind = engine
request.addfinalizer(models.Base.metadata.drop_all)
return connection
@pytest.fixture
def db_session(request, connection):
from transaction import abort
trans = connection.begin()
request.addfinalizer(trans.rollback)
request.addfinalizer(abort)
from models import DBSession
return DBSession
@pytest.fixture(scope='class')
def fake_job1(db_session):
model_instance = models.UserJob_Model(
jobstatus=0,
starttime=datetime.utcnow(),
lastmodified=datetime.utcnow()
)
db_session.add(model_instance)
db_session.flush()
#@pytest.mark.usefixtures("db_session")
#class BaseTest(object):
# def setup_method(self, method):
# self.config = testing.setUp()
#
# def teardown_method(self, method):
# transaction.abort()
# testing.tearDown
#
# --- test db functionality tests
def test_db_lookup(db_session):
model_instance = models.UserJob_Model(jobstatus=0,
starttime=datetime.utcnow(),
lastmodified=datetime.utcnow())
db_session.add(model_instance)
db_session.flush()
assert 1 == db_session.query(models.UserJob_Model).count()
def test_db_is_rolled_back(db_session):
assert 0 == db_session.query(models.UserJob_Model).count()
# --- process tests
#@pytest.mark.usefixtures("connection")
#@pytest.mark.usefixtures("db_session")
class TestProcess(unittest.TestCase):
fake_job_message = {u'job_id': u'1',
u'band_2': u'3',
u'band_3': u'2',
u'band_1': u'4',
u'scene_id': u'LC80470272015005LGN00',
u'email': u'[email protected]'}
def setUp(self):
self.session = Session
@mock.patch('landsat.downloader.Downloader')
def test_download_returns_correct_values(self, Downloader):
input_path, bands, scene_id = (render_1.download_and_set(
self.fake_job_message, render_1.PATH_DOWNLOAD))
self.assertEqual(input_path,
os.getcwd() + '/download/LC80470272015005LGN00')
self.assertEqual(bands, [u'4', u'3', u'2'])
self.assertEqual(scene_id, 'LC80470272015005LGN00')
@mock.patch('landsat.downloader.Downloader')
def test_download_updates_job_status(self, Downloader):
input_path, bands, scene_id = (render_1.download_and_set(
self.fake_job_message, render_1.PATH_DOWNLOAD))
job_f = JobFactory()
models.UserJob_Model.set_job_status(job_f.jobid, 1)
self.assertEqual(
[job_f], self.session.query(models.UserJob_Model).all()
)
self.session.commit()
import pdb; pdb.set_trace()
self.assertEqual(self.session.query.filter(models.UserJob_Model.jobid == 0))
def tearDown(self):
self.session.rollback()
Session.remove()
|
Python
| 0.000001 |
@@ -2367,17 +2367,16 @@
tests%0A%0A
-#
@pytest.
@@ -2406,17 +2406,16 @@
ction%22)%0A
-#
@pytest.
@@ -2817,24 +2817,25 @@
ession%0A%0A
+#
@mock.patch(
@@ -2839,37 +2839,55 @@
ch('
-landsat.downloader.Downloader
+recombinators_landsat.landsat_worker.render_1.b
')%0A
@@ -2930,36 +2930,24 @@
_values(self
-, Downloader
):%0A i
@@ -3291,16 +3291,17 @@
')%0A%0A
+#
@mock.pa
@@ -3309,37 +3309,55 @@
ch('
-landsat.downloader.Downloader
+recombinators_landsat.landsat_worker.render_1.b
')%0A
@@ -3404,20 +3404,8 @@
self
-, Downloader
):%0A
@@ -3718,24 +3718,24 @@
)%0A )%0A
+
self
@@ -3757,161 +3757,8 @@
()%0A%0A
- import pdb; pdb.set_trace()%0A %0A %0A %0A self.assertEqual(self.session.query.filter(models.UserJob_Model.jobid == 0))%0A%0A
|
03fe3aad7358ee4593b9e8909d5374bae9e58b34
|
revert changes
|
denumerator/denumerator.py
|
denumerator/denumerator.py
|
#!/usr/bin/python
# pylint: disable=invalid-name
"""
--- dENUMerator ---
by bl4de | [email protected] | Twitter: @_bl4de | HackerOne: bl4de
Enumerates list of subdomains (output from tools like Sublist3r or subbrute)
and creates output file with servers responding on port 80/HTTP
This indicates (in most caes) working webserver
usage:
$ ./denumerator.py [domain_list_file]
"""
import sys
import requests
welcome = """
--- dENUMerator ---
usage:
$ ./denumerator.py [domain_list_file]
"""
requests.packages.urllib3.disable_warnings()
allowed_http_responses = [200, 302, 304, 401, 404, 403, 500]
http_ports_short_list = [80, 443, 8000, 8008, 8080, 9080]
http_ports_long_list = [80, 443, 591, 981, 1311, 4444,
4445, 7001, 7002, 8000, 8008, 8080, 8088, 8222, 8530, 8531, 8887, 8888, 9080, 16080, 18091]
def usage():
"""
prints welcome message
"""
print welcome
def send_request(proto, domain, port=80):
"""
sends request to check if server is alive
"""
protocols = {
'http': 'http://',
'https': 'https://'
}
full_url = protocols.get(proto.lower()) + domain + ":" + str(port)
resp = requests.get(full_url,
timeout=5,
allow_redirects=False,
verify=False,
headers={'Host': domain})
if resp.status_code in allowed_http_responses:
print '[+] domain {}:\t\t HTTP {}'.format(domain, resp.status_code)
output_file.write('{}\n'.format(domain))
return resp.status_code
def enumerate_domains(domains):
"""
enumerates domain from domains
"""
for d in domains:
# TODO: make selection of port(s) list or pass as option:
for port in http_ports_short_list:
try:
d = d.strip('\n').strip('\r')
return_code = send_request('http', d, port)
# if http not working on this port, try https
if return_code not in allowed_http_responses:
send_request('https', d, port)
except requests.exceptions.InvalidURL:
print '[-] {} is not a valid URL :/'.format(d)
except requests.exceptions.ConnectTimeout:
print '[-] {} :('.format(d)
continue
except requests.exceptions.ConnectionError:
print '[-] connection to {} aborted :/'.format(d)
except requests.exceptions.ReadTimeout:
print '[-] {} read timeout :/'.format(d)
except requests.exceptions.TooManyRedirects:
print '[-] {} probably went into redirects loop :('.format(d)
else:
pass
if len(sys.argv) < 2:
print welcome
exit(0)
domains = open(sys.argv[1].strip(), 'rw').readlines()
output_file = open('denumerator-{}-output.txt'.format(domains[0].strip()), 'w')
enumerate_domains(domains)
output_file.close()
|
Python
| 0 |
@@ -12,17 +12,16 @@
python%0A#
-
pylint:
@@ -598,229 +598,8 @@
0%5D%0A%0A
-http_ports_short_list = %5B80, 443, 8000, 8008, 8080, 9080%5D%0A%0Ahttp_ports_long_list = %5B80, 443, 591, 981, 1311, 4444,%0A 4445, 7001, 7002, 8000, 8008, 8080, 8088, 8222, 8530, 8531, 8887, 8888, 9080, 16080, 18091%5D%0A%0A
%0Adef
@@ -705,17 +705,8 @@
main
-, port=80
):%0A
@@ -853,19 +853,28 @@
-full_url =
+resp = requests.get(
prot
@@ -910,59 +910,8 @@
main
- + %22:%22 + str(port)%0A resp = requests.get(full_url
,%0A
@@ -1394,121 +1394,8 @@
ns:%0A
- # TODO: make selection of port(s) list or pass as option:%0A for port in http_ports_short_list:%0A
@@ -1403,20 +1403,16 @@
try:%0A
-
@@ -1457,20 +1457,16 @@
-
-
return_c
@@ -1497,20 +1497,10 @@
', d
-, port)%0A
+)%0A
@@ -1532,21 +1532,8 @@
king
- on this port
, tr
@@ -1540,20 +1540,16 @@
y https%0A
-
@@ -1614,20 +1614,16 @@
-
send_req
@@ -1641,14 +1641,8 @@
', d
-, port
)%0A%0A
@@ -1640,36 +1640,32 @@
s', d)%0A%0A
-
except requests.
@@ -1691,36 +1691,32 @@
RL:%0A
-
-
print '%5B-%5D %7B%7D is
@@ -1746,36 +1746,32 @@
rmat(d)%0A
-
except requests.
@@ -1801,36 +1801,32 @@
ut:%0A
-
-
print '%5B-%5D %7B%7D :(
@@ -1829,36 +1829,32 @@
%7D :('.format(d)%0A
-
cont
@@ -1858,36 +1858,32 @@
ontinue%0A
-
except requests.
@@ -1914,36 +1914,32 @@
or:%0A
-
-
print '%5B-%5D conne
@@ -1972,36 +1972,32 @@
rmat(d)%0A
-
except requests.
@@ -2024,36 +2024,32 @@
ut:%0A
-
-
print '%5B-%5D %7B%7D re
@@ -2077,28 +2077,24 @@
(d)%0A
-
-
except reque
@@ -2126,20 +2126,16 @@
irects:%0A
-
@@ -2212,22 +2212,14 @@
-
else:%0A
-
|
5c0a19386894e36898a48e7f10f01008e284e0c9
|
Update dependency bazelbuild/bazel to latest version
|
third_party/bazel.bzl
|
third_party/bazel.bzl
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is autogenerated by copybara, please do not edit.
bazel_version = "03719362d021a241ef9af04f33db6efcfd18590a"
bazel_sha256 = "eff6cd1c44a7c3ec63163b415383a4fb7db6c99dfcda1288a586df9671346512"
|
Python
| 0.000066 |
@@ -655,128 +655,128 @@
= %22
-03719362d021a241ef9af04f33db6efcfd18590a%22%0Abazel_sha256 = %22eff6cd1c44a7c3ec63163b415383a4fb7db6c99dfcda1288a586df9671346512
+f259b8abfd575f544635f57f3bb6678d566ef309%22%0Abazel_sha256 = %227e262ca5f5595a74d75953dfdcb75b271c2561a292972da7f3be449a3e8b28f6
%22%0A
|
05a2189224589ac84b14240bf96b110d7c531dfb
|
add missing parent class inherit
|
vimball/base.py
|
vimball/base.py
|
import bz2
import errno
import gzip
import lzma
import os
import re
import tempfile
def mkdir_p(path):
"""Create potentially nested directories as required.
Does nothing if the path already exists and is a directory.
"""
try:
os.makedirs(path)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def is_vimball(fd):
"""Test for vimball archive format compliance.
Simple check to see if the first line of the file starts with standard
vimball archive header.
"""
fd.seek(0)
try:
header = fd.readline()
except UnicodeDecodeError:
# binary files will raise exceptions when trying to decode raw bytes to
# str objects in our readline() wrapper
return False
if re.match('^" Vimball Archiver', header) is not None:
return True
return False
class ArchiveError(Exception):
"""Catch-all archive error exception class."""
pass
class Vimball:
"""Vimball archive format."""
def __init__(self, path):
if not os.path.exists(path):
raise ArchiveError("path doesn't exist: '{}'".format(path))
self.path = path
_filebase, ext = os.path.splitext(path)
if ext == ".gz":
self.fd = gzip.open(path)
elif ext == ".bz2":
self.fd = bz2.BZ2File(path)
elif ext == ".xz":
self.fd = lzma.open(path)
else:
self.fd = open(path)
if not is_vimball(self.fd):
raise ArchiveError('invalid archive format')
def __del__(self):
try:
self.fd.close()
except AttributeError:
return
def readline(self):
"""Readline wrapper to force readline() to return str objects."""
line = self.fd.__class__.readline(self.fd)
if isinstance(line, bytes):
line = line.decode()
return line
@property
def files(self):
"""Yields archive file information."""
# try new file header format first, then fallback on old
for header in (r"(.*)\t\[\[\[1\n", r"^(\d+)\n$"):
header = re.compile(header)
filename = None
self.fd.seek(0)
line = self.readline()
while line:
m = header.match(line)
if m is not None:
filename = m.group(1)
try:
filelines = int(self.readline().rstrip())
except ValueError:
raise ArchiveError('invalid archive format')
filestart = self.fd.tell()
yield (filename, filelines, filestart)
line = self.readline()
if filename is not None:
break
def extract(self, extractdir=None, verbose=False):
"""Extract archive files to a directory."""
if extractdir is None:
filebase, ext = os.path.splitext(self.path)
if ext in ('.gz', '.bz2', '.xz'):
filebase, _ext = os.path.splitext(filebase)
extractdir = os.path.basename(filebase)
if os.path.exists(extractdir):
tempdir = tempfile.mkdtemp(prefix='vimball-', dir=os.getcwd())
extractdir = os.path.join(tempdir.split('/')[-1], extractdir)
self.fd.seek(0)
for filename, lines, offset in self.files:
filepath = os.path.join(extractdir, filename)
try:
directory = os.path.dirname(filepath)
mkdir_p(directory)
except OSError as e:
raise ArchiveError("failed creating directory '{}': {}".format(
directory, os.strerror(e.errno)))
with open(filepath, 'w') as f:
if verbose:
print(filepath)
self.fd.seek(offset)
for i in range(lines):
f.write(self.readline())
|
Python
| 0.000029 |
@@ -1023,16 +1023,24 @@
Vimball
+(object)
:%0A %22%22
|
24f5afff6b8e65c633521189f4ac6bf4fbacbdb7
|
Fix datapusher.wsgi to work with ckan-service-provider 0.0.2
|
deployment/datapusher.wsgi
|
deployment/datapusher.wsgi
|
import os
import sys
import hashlib
activate_this = os.path.join('/usr/lib/ckan/datapusher/bin/activate_this.py')
execfile(activate_this, dict(__file__=activate_this))
import ckanserviceprovider.web as web
import datapusher.jobs as jobs
os.environ['JOB_CONFIG'] = '/etc/ckan/datapusher_settings.py'
web.configure()
application = web.app
|
Python
| 0.000002 |
@@ -303,17 +303,12 @@
web.
-configure
+init
()%0Aa
|
efb420ddc6aa0052ecea6da84613da6e4cf1afc8
|
Update Bazel to latest version
|
third_party/bazel.bzl
|
third_party/bazel.bzl
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
bazel_version = "6fe70c2fef70b8a3da3aa3cbea26c6bf60f17e13"
bazel_sha256 = "ad525027ecc7056feb23fe96cfe8b28257a6c47a9d908e0bc4e0e0988bf61d28"
|
Python
| 0 |
@@ -591,128 +591,128 @@
= %22
-6fe70c2fef70b8a3da3aa3cbea26c6bf60f17e13%22%0Abazel_sha256 = %22ad525027ecc7056feb23fe96cfe8b28257a6c47a9d908e0bc4e0e0988bf61d28
+b017468d07da1e45282b9d153a4308fdace11eeb%22%0Abazel_sha256 = %22ce8dc5936238b6b7e27cdcdc13d481c94f20526fabfe20cbbceff17da83503e7
%22%0A
|
0bfa8373f82f801b34e7609f4ff6f28ab280a635
|
Change mnist.py to follow the latest spec
|
example/mnist/mnist.py
|
example/mnist/mnist.py
|
#!/usr/bin/env python
"""Chainer example: train a multi-layer perceptron on MNIST
This is a minimal example to write a feed-forward net. It requires scikit-learn
to load MNIST dataset.
"""
import numpy as np
import six
import chainer
from chainer import cuda, FunctionSet
import chainer.functions as F
from chainer import optimizers
class MNIST(object):
@staticmethod
def create(params):
self = MNIST()
if 'model_file_path' in params:
with open(params['model_file_path']) as model_pickle:
self.model = six.moves.cPickle.load(model_pickle)
else:
n_units = 1000
self.model = FunctionSet(
l1=F.Linear(784, n_units),
l2=F.Linear(n_units, n_units),
l3=F.Linear(n_units, 10))
if 'gpu' in params:
self.gpu = params['gpu']
else:
self.gpu = -1
self.prepare_gpu_and_optimizer()
return self
@staticmethod
def load(filepath, params):
with open(filepath, 'r') as f:
return six.moves.cPickle.load(f)
def prepare_gpu_and_optimizer(self):
if self.gpu >= 0:
cuda.init(self.gpu)
self.model.to_gpu()
# Setup optimizer
self.optimizer = optimizers.Adam()
self.optimizer.setup(self.model.collect_parameters())
def forward(self, x_data, train=True):
x = chainer.Variable(x_data)
h1 = F.dropout(F.relu(self.model.l1(x)), train=train)
h2 = F.dropout(F.relu(self.model.l2(h1)), train=train)
return self.model.l3(h2)
def fit(self, xys):
x = []
y = []
for d in xys:
x.append(d['data'])
y.append(d['label'])
x_batch = np.array(x, dtype=np.float32)
y_batch = np.array(y, dtype=np.int32)
if self.gpu >= 0:
x_batch = cuda.to_gpu(x_batch)
y_batch = cuda.to_gpu(y_batch)
self.optimizer.zero_grads()
y = self.forward(x_batch)
t = chainer.Variable(y_batch)
loss = F.softmax_cross_entropy(y, t)
acc = F.accuracy(y, t)
loss.backward()
self.optimizer.update()
nloss = float(cuda.to_cpu(loss.data)) * len(y_batch)
naccuracy = float(cuda.to_cpu(acc.data)) * len(y_batch)
retmap = {
'loss': nloss,
'accuracy': naccuracy,
}
return retmap
def predict(self, x):
# non batch
xx = []
xx.append(x)
x_data = np.array(xx, dtype=np.float32)
if self.gpu >= 0:
x_data = cuda.to_gpu(x_data)
y = self.forward(x_data, train=False)
y = y.data.reshape(y.data.shape[0], y.data.size / y.data.shape[0])
pred = y.argmax(axis=1)
return int(pred[0])
def get_model(self):
return self.model
def save(self, filepath, params):
with open(filepath, 'w') as f:
six.moves.cPickle.dump(self, f)
def load_model(self, model_data):
self.model = six.moves.cPickle.loads(str(model_data))
|
Python
| 0 |
@@ -387,21 +387,30 @@
create(
-param
+*args, **kwarg
s):%0A
@@ -456,29 +456,29 @@
le_path' in
-param
+kwarg
s:%0A
@@ -490,21 +490,21 @@
th open(
-param
+kwarg
s%5B'model
@@ -831,21 +831,21 @@
gpu' in
-param
+kwarg
s:%0A
@@ -862,21 +862,21 @@
f.gpu =
-param
+kwarg
s%5B'gpu'%5D
@@ -1012,37 +1012,46 @@
load(filepath,
-param
+*args, **kwarg
s):%0A with
@@ -2923,21 +2923,30 @@
lepath,
-param
+*args, **kwarg
s):%0A
|
8959d982ddc810f9c226ce36884521cf979a61f1
|
add destroy cb
|
gui/tests/testicontheme.py
|
gui/tests/testicontheme.py
|
#!/usr/bin/env python
# doesnt work. segfault.
# TODO: other screens?
import pygtk
pygtk.require("2.0")
import gtk
import xfce4
widget = xfce4.gui.IconTheme(gtk.gdk.screen_get_default())
ic = widget.load("folder", 24)
print ic
icname = widget.lookup("folder", 24)
print icname
image = gtk.Image()
image.set_from_pixbuf(ic)
image.show()
w = gtk.Window()
w.add(image)
w.show()
gtk.main()
|
Python
| 0 |
@@ -348,16 +348,64 @@
indow()%0A
+w.connect(%22destroy%22, lambda x: gtk.main_quit())%0A
w.add(im
|
bf9ca12295aa62cb3e0f1885b99d7c228072b0c7
|
fix 'has attrs, but not com.apple.FinderInfo' bug...
|
finder_colors.py
|
finder_colors.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
""" finder_colors.py - Setting colours of files/folders from the commandline.
Copyright (c) 2013 Daniel Fairhead <danthedeckie on github>
--------
Contributors:
- Panayotis Vryonis <vrypan on github>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
------------------
Usage:
$finder_colors.py <filename>
outputs which colour it's been set to.
$finder_colors.py <color> <filename>
sets the color.
------------------
You can also include this as a python module, and use the two functions:
set(filename, color)
and
get(filename)
which work pretty much as you'd expect.
"""
from __future__ import print_function
from xattr import xattr
from sys import argv, stderr
__version__ = '0.9.2'
_FINDER_INFO_TAG = u'com.apple.FinderInfo'
COLORS = {'none': 0, 'gray': 2, 'green': 4, 'purple': 6,
'blue': 8, 'yellow': 10, 'red': 12, 'orange': 14}
NAMES = {0: 'none', 2: 'gray', 4: 'green', 6: 'purple',
8: 'blue', 10 : 'yellow', 12 : 'red', 14 : 'orange' }
BLANK = 32*chr(0)
def get(filename):
''' Get OSX Finder Color (extended attribute) of path (file or folder) '''
try:
attrs = xattr(filename)
color_num = ord(attrs.get(_FINDER_INFO_TAG)[9]) & 14
# & 14 to mask with "1110" (ie ignore all other bits).
return NAMES[color_num]
except IOError as err:
if err.errno == 93: # attribute not found...
return NAMES[0]
# else
raise err
def set(filename, color): # pylint: disable=W0622
''' Set OSX Finder Color (extended attribute) of path (file or folder) '''
attrs = xattr(filename)
if _FINDER_INFO_TAG in attrs:
previous = attrs[_FINDER_INFO_TAG]
else:
previous = BLANK
prev_color_extra_bits = ord(previous[9]) & (255-14)
# these are all the bits in previous[9] not used for color.
new = previous[:9] \
+ chr(COLORS[color] \
+ prev_color_extra_bits) \
+ previous[10:]
attrs.set(_FINDER_INFO_TAG, new)
return new
###############################################################################
# If this is used as a stand-alone script:
if __name__ == '__main__':
def display(pathname):
''' display filename\tcolor '''
print(pathname, get(pathname), sep='\t')
def usage(): # pylint: disable=C0111
print ('Usage:\n\n'
'{0} <filename(s)>\n\n'
'to find out what colour a file is.\n'
'Output format is <filename><TAB><color><NEWLINE>\n\n'
'or\n\n'
'{0} [color] <filename(s)>\n\n'
'to set the color of those file(s).\n\n'
'Possible colors are:'.format(argv[0]))
print (*COLORS, sep=', ') # pylint: disable=W0142
try:
if len(argv) == 1: # No arguments, so display a usage message.
usage()
elif len(argv) == 2: # One argument, so presumably a file.
display(argv[1])
else: # At least 2 arguments...
# If there are more args, then the last one *could* be a color,
# in which case, set all preceding mentioned files to that color.
# Otherwise, if it's a pathname, then display it and all the
# other paths and their colors:
if argv[1] in COLORS:
for fn in argv[2:]:
set(fn, argv[1])
display(fn)
else:
for f in argv[1:]:
display(f)
except Exception as err: # pylint: disable=W0703
print(err, file=stderr)
if hasattr(err,'errno') and err.errno != 0:
exit(err.errno)
else:
exit(1)
|
Python
| 0 |
@@ -1712,17 +1712,17 @@
= '0.9.
-2
+3
'%0A%0A_FIND
@@ -2458,16 +2458,68 @@
aise err
+%0A except KeyError as err:%0A return NAMES%5B0%5D
%0A%0Adef se
|
23c8044b84557dea940d527213022bfa19d28293
|
test that Human is in Ensembl species
|
tests/test_ensembl_species_service.py
|
tests/test_ensembl_species_service.py
|
#
# Author : Manuel Bernal Llinares
# Project : trackhub-creator
# Timestamp : 04-07-2017 09:14
# ---
# © 2017 Manuel Bernal Llinares <[email protected]>
# All rights reserved.
#
"""
Unit Tests for Ensembl Species Service
"""
import unittest
# App modules
import ensembl.service
class TestEnsemblSpeciesService(unittest.TestCase):
__NCB_TAXONOMY_HUMAN = 9606
def setUp(self):
self.ensembl_service = ensembl.service.get_service()
def test_get_species_data(self):
species_data_service = self.ensembl_service.get_species_data_service()
self.assertIsNotNone(species_data_service.get_species_data(),
"Requested RAW species data from Ensembl IS NOT None")
def test_count_of_species(self):
self.assertNotEqual(self.ensembl_service.get_species_data_service().count_ensembl_species(),
0,
"Ensembl has a non-zero number of species")
def test_human_species_is_present(self):
"""
Test that Human taxonomy is present, this unit test is also testing the indexing mechanism
:return: no returned value
"""
#TODO
pass
if __name__ == '__main__':
print("ERROR: This script is part of a pipeline collection and it is not meant to be run in stand alone mode")
|
Python
| 0.999977 |
@@ -367,17 +367,15 @@
N =
+'
9606
+'
%0A
-
%0A
@@ -1182,26 +1182,223 @@
-#TODO%0A pass
+self.assertIsNotNone(%0A self.ensembl_service.get_species_data_service().get_species_entry_for_taxonomy_id(%0A self.__NCB_TAXONOMY_HUMAN), %22Human NCBI taxonomy is in species data from Ensembl%22)
%0A%0A%0Ai
|
d977a9ee9814264bd1d3080cadcd7e43b7c1d27e
|
Revert changes
|
examples/News/news2.py
|
examples/News/news2.py
|
#!/usr/bin/env python
from Kiwi2 import Delegates
from Kiwi2.Widgets.List import List, Column
from Kiwi2.initgtk import gtk
class NewsItem:
"""An instance that holds information about a news article."""
def __init__(self, title, author, url):
self.title, self.author, self.url = title, author, url
# Assemble friendly Pigdog.org news into NewsItem instances so they can
# be used in the CListDelegate
news = [
NewsItem("Smallpox Vaccinations for EVERYONE", "JRoyale",
"http://www.pigdog.org/auto/Power_Corrupts/link/2700.html"),
NewsItem("Is that uranium in your pocket or are you just happy to see me?",
"Baron Earl",
"http://www.pigdog.org/auto/bad_people/link/2699.html"),
NewsItem("Cut 'n Paste", "Baron Earl",
"http://www.pigdog.org/auto/ArtFux/link/2690.html"),
NewsItem("A Slippery Exit", "Reverend CyberSatan",
"http://www.pigdog.org/auto/TheCorporateFuck/link/2683.html"),
NewsItem("Those Crazy Dutch Have Resurrected Elvis", "Miss Conduct",
"http://www.pigdog.org/auto/viva_la_musica/link/2678.html")
]
# Specify the columns: one for each attribute of NewsItem, the URL
# column invisible by default
my_columns = [ Column("title", sorted=True),
Column("author", justify=gtk.JUSTIFY_RIGHT),
Column("url", title="URL", visible=False) ]
kiwilist = List(my_columns, news)
w = gtk.Window()
w.set_size_request(600, 250)
w.add(kiwilist)
w.show_all()
gtk.main()
|
Python
| 0.000001 |
@@ -1267,35 +1267,8 @@
hor%22
-, justify=gtk.JUSTIFY_RIGHT
), %0A
@@ -1365,60 +1365,49 @@
ws)%0A
-w = gtk.Window()%0Aw.set_size_request(600, 250)%0Aw.add(
+slave = Delegates.SlaveDelegate(toplevel=
kiwi
@@ -1412,17 +1412,21 @@
wilist)%0A
-w
+slave
.show_al
|
7c91d556220088ea5286611f3674aaa88f3a6340
|
Add failing test for "Crash if session was flushed before commit (with validity strategy)"
|
tests/test_exotic_operation_combos.py
|
tests/test_exotic_operation_combos.py
|
from six import PY3
from tests import TestCase
class TestExoticOperationCombos(TestCase):
def test_insert_deleted_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 0
def test_insert_deleted_and_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2 = self.Article(id=article.id, name=u'Some article')
self.session.add(article2)
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
def test_insert_flushed_object(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
self.session.add(article)
self.session.flush()
self.session.commit()
assert article.versions.count() == 1
assert article.versions[0].operation_type == 0
def test_replace_deleted_object_with_update(self):
article = self.Article()
article.name = u'Some article'
article.content = u'Some content'
article2 = self.Article()
article2.name = u'Another article'
article2.content = u'Some other content'
self.session.add(article)
self.session.add(article2)
self.session.commit()
self.session.delete(article)
self.session.flush()
article2.id = article.id
self.session.commit()
assert article2.versions.count() == 2
assert article2.versions[0].operation_type == 0
assert article2.versions[1].operation_type == 1
|
Python
| 0 |
@@ -2242,28 +2242,147 @@
ions%5B1%5D.operation_type == 1%0A
+%0A%0Aclass TestExoticOperationCombosWithValidityStrategy(TestExoticOperationCombos):%0A versioning_strategy = 'validity'%0A
|
e816b1f63c299141c6ad907c860d2c5411829405
|
Simplify aggregator code
|
aleph/analysis/aggregate.py
|
aleph/analysis/aggregate.py
|
import logging
from Levenshtein import setmedian
from aleph.analysis.util import tag_key
from aleph.analysis.util import TAG_COUNTRY, TAG_LANGUAGE, TAG_PHONE
from aleph.analysis.util import TAG_PERSON, TAG_COMPANY
log = logging.getLogger(__name__)
class TagAggregator(object):
MAX_TAGS = 10000
CUTOFFS = {
TAG_COUNTRY: .2,
TAG_LANGUAGE: .3,
TAG_PERSON: .003,
TAG_COMPANY: .003,
TAG_PHONE: .05,
}
def __init__(self):
self.tags = {}
self.types = {}
def add(self, type_, tag):
key = tag_key(tag)
if key is None:
return
if (key, type_) not in self.tags:
self.tags[(key, type_)] = []
self.tags[(key, type_)].append(tag)
if type_ not in self.types:
if len(self.types) > self.MAX_TAGS:
return
self.types[type_] = 0
self.types[type_] += 1
def type_cutoff(self, type_):
freq = self.CUTOFFS.get(type_, 0)
return self.types.get(type_, 0) * freq
@property
def entities(self):
for (key, type_), tags in self.tags.items():
# skip entities that do not meet a threshold of relevance:
cutoff = self.type_cutoff(type_)
if len(tags) < cutoff:
continue
label = tags[0]
if type_ in (TAG_COMPANY, TAG_PERSON) and len(set(tags)) > 0:
label = setmedian(tags)
yield label, type_
def __len__(self):
return len(self.tags)
|
Python
| 0.000073 |
@@ -17,36 +17,80 @@
rom
-Levenshtein import setmedian
+collections import defaultdict%0Afrom followthemoney.types import registry
%0A%0Afr
@@ -175,22 +175,8 @@
TRY,
- TAG_LANGUAGE,
TAG
@@ -239,17 +239,16 @@
OMPANY%0A%0A
-%0A
log = lo
@@ -365,34 +365,8 @@
TRY:
- .2,%0A TAG_LANGUAGE:
.3,
@@ -491,17 +491,34 @@
elf.
-tags = %7B%7D
+values = defaultdict(list)
%0A
@@ -539,10 +539,24 @@
s =
-%7B%7D
+defaultdict(int)
%0A%0A
@@ -575,18 +575,19 @@
lf,
-type_, tag
+prop, value
):%0A
@@ -607,19 +607,21 @@
tag_key(
-tag
+value
)%0A
@@ -679,135 +679,13 @@
ey,
-type_) not in self.tags:%0A self.tags%5B(key, type_)%5D = %5B%5D%0A self.tags%5B(key, type_)%5D.append(tag)%0A%0A if type_
+prop)
not
@@ -693,19 +693,20 @@
in self.
-typ
+valu
es:%0A
@@ -725,19 +725,20 @@
en(self.
-typ
+valu
es) %3E se
@@ -785,33 +785,46 @@
-
-
self.
-types%5Btype_%5D = 0
+values%5B(key, prop)%5D.append(value)
%0A
@@ -839,21 +839,20 @@
f.types%5B
-type_
+prop
%5D += 1%0A%0A
@@ -859,20 +859,20 @@
def
-type
+prop
_cutoff(
@@ -877,21 +877,20 @@
f(self,
-type_
+prop
):%0A
@@ -912,29 +912,28 @@
CUTOFFS.get(
-type_
+prop
, 0)%0A
@@ -955,21 +955,20 @@
pes.get(
-type_
+prop
, 0) * f
@@ -1027,18 +1027,15 @@
or (
-key, type_
+_, prop
), t
@@ -1046,19 +1046,21 @@
in self.
-tag
+value
s.items(
@@ -1163,20 +1163,20 @@
elf.
-type
+prop
_cutoff(
type
@@ -1171,21 +1171,20 @@
_cutoff(
-type_
+prop
)%0A
@@ -1281,21 +1281,20 @@
if
-type_
+prop
in (TAG
@@ -1318,31 +1318,8 @@
SON)
- and len(set(tags)) %3E 0
:%0A
@@ -1344,17 +1344,26 @@
l =
-setmedian
+registry.name.pick
(tag
@@ -1394,13 +1394,12 @@
el,
-type_
+prop
%0A%0A
@@ -1443,14 +1443,16 @@
en(self.
-tag
+value
s)%0A
|
30f55607990a356f49c03f04a707aa7d59a8eedf
|
Remove unncessary extra parens
|
examples/chatserver.py
|
examples/chatserver.py
|
#!/usr/bin/env python
"""Chat Server Example
This example demonstrates how to create a very simple telnet-style chat
server that supports many connecting clients.
"""
from optparse import OptionParser
from circuits import Component, Debugger
from circuits.net.sockets import TCPServer, Write
__version__ = "0.0.1"
USAGE = "%prog [options]"
VERSION = "%prog v" + __version__
def parse_options():
parser = OptionParser(usage=USAGE, version=VERSION)
parser.add_option(
"-b", "--bind",
action="store", type="string",
default="0.0.0.0:8000", dest="bind",
help="Bind to address:[port]"
)
parser.add_option(
"-d", "--debug",
action="store_true",
default=False, dest="debug",
help="Enable debug mode"
)
opts, args = parser.parse_args()
return opts, args
class ChatServer(Component):
def init(self, args, opts):
"""Initialize our ``ChatServer`` Component.
This uses the convenience ``init`` method which is called after the
component is proeprly constructed and initialized and passed the
same args and kwargs that were passed during construction.
"""
self.args = args
self.opts = opts
self.clients = {}
if opts.debug:
Debugger().register(self)
if ":" in opts.bind:
address, port = opts.bind.split(":")
port = int(port)
else:
address, port = opts.bind, 8000
bind = (address, port)
TCPServer(bind).register(self)
def broadcast(self, data, exclude=None):
exclude = exclude or []
targets = (sock for sock in self.clients.keys() if sock not in exclude)
for target in targets:
self.fire(Write(target, data))
def connect(self, sock, host, port):
"""Connect Event -- Triggered for new connecting clients"""
self.clients[sock] = {
"host": sock,
"port": port,
"state": {
"nickname": None,
"registered": False
}
}
self.fire(Write(sock, "Welcome to the circuits Chat Server!\n"))
self.fire(Write(sock, "Please enter a desired nickname: "))
def disconnect(self, sock):
"""Disconnect Event -- Triggered for disconnecting clients"""
nickname = self.clients[sock]["state"]["nickname"]
self.broadcast("!!! {0:s} has left !!!\n".format(nickname),
exclude=[sock])
del self.clients[sock]
def read(self, sock, data):
"""Read Event -- Triggered for when client conenctions have data"""
if not self.clients[sock]["state"]["registered"]:
nickname = data.strip()
self.clients[sock]["state"]["registered"] = True
self.clients[sock]["state"]["nickname"] = nickname
self.broadcast("!!! {0:s} has joined !!!\n".format(nickname),
exclude=[sock])
else:
nickname = self.clients[sock]["state"]["nickname"]
self.broadcast("<{0:s}> {1:s}\n".format(nickname, data.strip()),
exclude=[sock])
def main():
opts, args = parse_options()
# Configure and "run" the System.
(ChatServer(args, opts)).run()
if __name__ == "__main__":
main()
|
Python
| 0.000004 |
@@ -3284,17 +3284,16 @@
em.%0A
-(
ChatServ
@@ -3306,17 +3306,16 @@
s, opts)
-)
.run()%0A%0A
|
0f2336bf9b190009a83c077671fa65f5e6c53f8b
|
Add error handling
|
server/python/server.py
|
server/python/server.py
|
import os
import json
from get_discovery_collections import get_constants
from flask import Flask, jsonify, render_template, request
from flask_cors import CORS
from dotenv import load_dotenv, find_dotenv
import watson_developer_cloud.natural_language_understanding.features.v1 as features # noqa
from watson_developer_cloud import DiscoveryV1, NaturalLanguageUnderstandingV1
try:
load_dotenv(find_dotenv())
except IOError:
print('warning: no .env file loaded')
app = Flask(
__name__,
static_folder="../../client/knowledge_base_search/build/static",
template_folder="../../client/knowledge_base_search/build"
)
CORS(app, resources={r"/api/*": {"origins": "*"}})
# Discovery
discovery = DiscoveryV1(
url=os.getenv('DISCOVERY_URL'),
username=os.getenv('DISCOVERY_USERNAME'),
password=os.getenv('DISCOVERY_PASSWORD'),
version="2016-12-01"
)
# NLU
nlu = NaturalLanguageUnderstandingV1(
url=os.getenv('NLU_URL'),
username=os.getenv('NLU_USERNAME'),
password=os.getenv('NLU_PASSWORD'),
version="2017-02-27"
)
# retrieve the following:
# {
# environment_id,
# collection_id_regular,
# collection_id_enriched
# }
constants = get_constants(
discovery,
regular_name=os.getenv(
'DISCOVERY_REGULAR_COLLECTION_NAME',
'knowledge_base_regular'
),
enriched_name=os.getenv(
'DISCOVERY_ENRICHED_COLLECTION_NAME',
'knowledge_base_enriched'
)
)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api/environments')
def get_environments():
return jsonify(discovery.get_environments())
@app.route('/api/environments/<environment_id>')
def get_environment(environment_id):
return jsonify(discovery.get_environment(environment_id=environment_id))
@app.route('/api/environments/<environment_id>/configurations')
def list_configurations(environment_id):
return jsonify(
discovery.list_configurations(
environment_id=environment_id
)
)
@app.route('/api/environments/<environment_id>/configurations/<configuration_id>') # noqa
def get_configuration(environment_id, configuration_id):
return jsonify(
discovery.get_configuration(
environment_id=environment_id,
configuration_id=configuration_id
)
)
@app.route('/api/environments/<environment_id>/collections')
def list_collections(environment_id):
return jsonify(discovery.list_collections(environment_id=environment_id))
@app.route('/api/environments/<environment_id>/collections/<collection_id>')
def get_collection(environment_id, collection_id):
return jsonify(
discovery.get_collection(
environment_id=environment_id,
collection_id=collection_id
)
)
def get_enriched_query(question):
response = nlu.analyze(text=question, features=[features.Keywords()])
keywords = response.get('keywords', [])
query = ','.join(map(lambda keyword: keyword['text'], keywords))
if len(query) > 0:
return {'query': 'enriched_title.keywords.text:' + query}
else:
return {'query': question}
@app.route('/api/query/<collection_type>', methods=['POST'])
def query(collection_type):
collection_id_key = 'collection_id_regular'
query_options = json.loads(request.data)
if collection_type == 'enriched':
collection_id_key = 'collection_id_enriched'
query_options = get_enriched_query(query_options['query'])
return jsonify(
discovery.query(
environment_id=constants['environment_id'],
collection_id=constants[collection_id_key],
query_options=query_options
)
)
if __name__ == '__main__':
# If we are in the Bluemix environment, set port to 0.0.0.0
# otherwise set it to localhost (127.0.0.1)
HOST = '0.0.0.0' if os.getenv('VCAP_APPLICATION') else '127.0.0.1'
# Get port from the Bluemix environment, or default to 5000
PORT_NUMBER = int(os.getenv('PORT', '5000'))
app.run(host=HOST, port=PORT_NUMBER, debug=False)
|
Python
| 0.000002 |
@@ -154,16 +154,58 @@
rt CORS%0A
+from requests.exceptions import HTTPError%0A
from dot
@@ -4104,16 +4104,262 @@
)%0A%0A%0A
[email protected](Exception)%0Adef handle_error(e):%0A code = 500%0A error = 'Error processing the request'%0A if isinstance(e, HTTPError):%0A code = e.code%0A error = str(e.message)%0A%0A return jsonify(error=error, code=code), code%0A%0A%0A
if __nam
|
8c6ebf17541e48e6d4fdd9d521a4391ce621f301
|
Use getNextSchedule in ScheduleDao
|
broadcast_api.py
|
broadcast_api.py
|
from mysql import mysql
from mysql import DB_Exception
from datetime import date
from datetime import datetime
from datetime import timedelta
import os.path
import json
#The API load schedule.txt and find out the first image which has not print and the time limit still allow
def load_schedule():
try:
return_msg = {}
return_msg["result"] = "fail"
schedule_dir = ""
sche_target_id = ""
type_id = ""
system_file_name = ""
#connect to mysql
db = mysql()
db.connect()
#find schedule
sql = ("SELECT sche_id, sche_target_id, sche_display_time FROM schedule WHERE sche_is_used=0 ORDER BY sche_sn ASC LIMIT 1")
pure_result = db.query(sql)
try:
return_msg["schedule_id"] = pure_result[0][0]
sche_target_id = pure_result[0][1]
return_msg["display_time"] = int(pure_result[0][2])
except:
db.close()
return_msg["error"] = "no schedule"
return return_msg
#find the file
if sche_target_id[0:4]=="imge":
sql = ("SELECT type_id, img_system_name, img_like_count FROM image_data WHERE img_id=\"" + sche_target_id + "\" ")
return_msg["file_type"] = "image"
elif sche_target_id[0:4]=="text":
sql = ("SELECT type_id, text_system_name, text_like_count FROM text_data WHERE text_id=\"" + sche_target_id + "\" ")
return_msg["file_type"] = "text"
else :
db.close()
return_msg["error"] = "target id type error"
return return_msg
pure_result = db.query(sql)
try:
type_id = int(pure_result[0][0])
system_file_name = pure_result[0][1]
return_msg["like_count"] = int(pure_result[0][2])
except:
db.close()
return_msg["error"] = "no file record"
return return_msg
#find type dir
sql = ("SELECT type_dir, type_name FROM data_type WHERE type_id=" + str(type_id))
pure_result = db.query(sql)
try:
schedule_dir = os.path.join(schedule_dir, "static/")
schedule_dir = os.path.join(schedule_dir, pure_result[0][0])
schedule_dir = os.path.join(schedule_dir, system_file_name)
return_msg["file"] = os.path.join(pure_result[0][0], system_file_name)
return_msg["type_name"] = str(pure_result[0][1])
except:
db.close()
return_msg["error"] = "no type record"
return return_msg
#if text read file
if return_msg["file_type"] == "text":
if not os.path.isfile(schedule_dir) :
db.close()
return_msg["error"] = "no file"
return return_msg
else :
with open(schedule_dir,"r") as fp:
file_content = json.load(fp)
return_msg["file_text"] = file_content
#update display count
if return_msg["file_type"] == "image":
sql = "UPDATE image_data SET img_display_count=img_display_count+1 WHERE img_id='"+sche_target_id+"'"
elif return_msg["file_type"] == "text":
sql = "UPDATE text_data SET text_display_count=text_display_count+1 WHERE text_id='"+sche_target_id+"'"
db.cmd(sql)
return_msg["result"] = "success"
return return_msg
except DB_Exception as e:
db.close()
return_msg["error"] = e.args[1]
return return_msg
|
Python
| 0 |
@@ -553,16 +553,21 @@
#find
+next
schedule
@@ -579,467 +579,406 @@
-sql = (%22SELECT sche_id, sche_target_id, sche_display_time FROM schedule WHERE sche_is_used=0 ORDER BY sche_sn ASC LIMIT 1%22)%0A pure_result = db.query(sql)%0A try:%0A return_msg%5B%22schedule_id%22%5D = pure_result%5B0%5D%5B0%5D%0A sche_target_id = pure_result%5B0%5D%5B1%5D%0A return_msg%5B%22display_time%22%5D = int(pure_result%5B0%5D%5B2%5D)%0A except:%0A db.close()%0A return_msg%5B%22error%22%5D = %22no schedule%22%0A return return_msg%0A
+with ScheduleDao() as scheduleDao:%0A next_schedule = scheduleDao.getNextSchedule()%0A if next_schedule is None:%0A return_msg%5B%22error%22%5D = %22no schedule%22%0A return return_msg%0A return_msg%5B%22schedule_id%22%5D = next_schedule%5B'schedule_id'%5D%0A sche_target_id = next_schedule%5B'sche_target_id'%5D%0A return_msg%5B%22display_time%22%5D = int(next_schedule%5B'display_time'%5D)
%0A%0A
|
ba127d99871cde38d08c12091fda03381942f461
|
remove petsc vec for now
|
tests/test_solve/test_slepc_solver.py
|
tests/test_solve/test_slepc_solver.py
|
# TODO: TEST NON_HERMITIAN
# TODO: TEST multiprocessing throws no error with petsc
from pytest import fixture, mark
import numpy as np
import scipy.sparse as sp
from numpy.testing import assert_allclose
from quimb import (
qu,
rand_uni,
ldmul,
rand_matrix,
rand_herm,
seigsys,
overlap,
eye,
)
from quimb.solve import SLEPC4PY_FOUND
from quimb.solve.scipy_solver import scipy_svds
if SLEPC4PY_FOUND:
from quimb.solve.slepc_solver import (
slepc_seigsys,
slepc_svds,
convert_to_petsc,
new_petsc_vec)
slepc4py_notfound_msg = "No SLEPc4py installation"
slepc4py_test = mark.skipif(not SLEPC4PY_FOUND, reason=slepc4py_notfound_msg)
@fixture
def prematsparse():
u = rand_uni(4)
a = u @ ldmul(np.array([-1, 2, 4, -3]), u.H)
a = qu(a, sparse=True)
return u, a
@fixture
def bigsparsemat():
return rand_matrix(100, sparse=True, density=0.1)
# --------------------------------------------------------------------------- #
# TESTS #
# --------------------------------------------------------------------------- #
@slepc4py_test
class TestConvertToPETScConversion:
def test_csr(self):
a = rand_matrix(2, sparse=True, density=0.5)
b = convert_to_petsc(a)
assert b.getType() == 'seqaij'
def test_bsr(self):
a = sp.kron(rand_matrix(2), eye(2, sparse=True), format='bsr')
b = convert_to_petsc(a)
assert b.getType() == 'seqbaij'
assert b.getBlockSize() == 2
def test_vec(self):
a = np.array([1, 2, 3, 4])
b = convert_to_petsc(a)
assert_allclose(b.getArray(), a)
def test_dense(self):
a = rand_matrix(3)
b = convert_to_petsc(a)
assert b.getType() == 'seqdense'
def test_new_petsc_vector(self):
a = new_petsc_vec(4)
assert a.getArray() is not None
@slepc4py_test
class TestSlepcSeigsys:
def test_internal_eigvals(self, prematsparse):
u, a = prematsparse
lk = slepc_seigsys(a, k=2, sigma=0.5, return_vecs=False)
assert_allclose(lk, [-1, 2])
@mark.parametrize("which, output", [
('lm', 4),
("sa", -3),
])
def test_slepc_seigsys_groundenergy(self, prematsparse, which, output):
u, a = prematsparse
lk = slepc_seigsys(a, k=1, which=which, return_vecs=False)
assert_allclose(lk, output)
def test_slepc_seigsys_eigvecs(self):
h = rand_herm(100, sparse=True, density=0.2)
lks, vks = slepc_seigsys(h, k=5)
lka, vka = seigsys(h, k=5)
assert vks.shape == vka.shape
for ls, vs, la, va in zip(lks, vks.T, lka, vka.T):
assert_allclose(ls, la)
assert_allclose(overlap(vs, va), 1.0)
def test_aeigvals_all_consecutive(self):
# TODO ************************************************************** #
# h = ham_heis(n=10, sparse=True)
pass
@slepc4py_test
class TestSlepcSvds:
def test_simple(self, prematsparse):
u, a = prematsparse
lk = slepc_svds(a, k=1, return_vecs=False)
assert_allclose(lk, 4)
@mark.parametrize("SVDType", ['cross', 'lanczos'])
def test_random_compare_scipy(self, bigsparsemat, SVDType):
a = bigsparsemat
lk = slepc_svds(a, k=5, return_vecs=False, SVDType=SVDType)
ls = scipy_svds(a, k=5, return_vecs=False)
assert_allclose(lk, ls)
@mark.parametrize("SVDType", ['cross', 'lanczos'])
def test_unitary_vectors(self, bigsparsemat, SVDType):
a = bigsparsemat
uk, sk, vk = slepc_svds(a, k=10, return_vecs=True, SVDType=SVDType)
assert_allclose(uk.H @ uk, eye(10), atol=1e-6)
assert_allclose(vk @ vk.H, eye(10), atol=1e-6)
pk, lk, qk = scipy_svds(a, k=10, return_vecs=True)
assert_allclose(sk, lk)
assert pk.shape == uk.shape
assert vk.shape == qk.shape
assert_allclose(abs(uk.H @ pk), eye(10), atol=1e-7)
assert_allclose(abs(qk @ vk.H), eye(10), atol=1e-7)
|
Python
| 0 |
@@ -1567,24 +1567,26 @@
() == 2%0A%0A
+ #
def test_ve
@@ -1589,32 +1589,34 @@
t_vec(self):%0A
+ #
a = np.arra
@@ -1630,24 +1630,26 @@
, 3, 4%5D)%0A
+ #
b = con
@@ -1660,32 +1660,34 @@
_to_petsc(a)%0A
+ #
assert_allc
|
ca06a55d096eb4c67bf70c479107128b73087ab9
|
integrate update
|
w1_integrate.py
|
w1_integrate.py
|
from sympy import integrate, symbols, log
# if 0 <= x < 0.25:
# return float(0)
# elif 0.25 <= x < 0.5:
# return 16.0 * (x - 0.25)
# elif 0.5 <= x < 0.75:
# return -16.0 * (x - 0.75)
# elif 0.75 < x <= 1:
# return float(0)
# h(f) = integrate(-f(x)lnf(x), (x, 0, 1))
x = symbols('x')
left = integrate(-16.0 * (x - 0.25) * log(16.0 * (x - 0.25)), (x, 0.25, 0.5))
right = integrate(16.0 * (x - 0.75) * log(-16.0 * (x - 0.75)), (x, 0.5, 0.75))
print 'left {0}'.format(left)
print 'right {0}'.format(right)
print 'all {0}'.format(left + right)
|
Python
| 0 |
@@ -34,17 +34,19 @@
ols, log
-%0A
+%0D%0A%0D
%0A# if 0
@@ -57,16 +57,17 @@
%3C 0.25:
+%0D
%0A# r
@@ -80,16 +80,17 @@
float(0)
+%0D
%0A# elif
@@ -105,16 +105,17 @@
x %3C 0.5:
+%0D
%0A# r
@@ -137,16 +137,17 @@
- 0.25)
+%0D
%0A# elif
@@ -162,16 +162,17 @@
%3C 0.75:
+%0D
%0A# r
@@ -195,16 +195,17 @@
- 0.75)
+%0D
%0A# elif
@@ -218,16 +218,17 @@
x %3C= 1:
+%0D
%0A# r
@@ -241,16 +241,17 @@
float(0)
+%0D
%0A# h(f)
@@ -285,17 +285,19 @@
, 0, 1))
-%0A
+%0D%0A%0D
%0Ax = sym
@@ -305,16 +305,17 @@
ols('x')
+%0D
%0Aleft =
@@ -384,16 +384,17 @@
5, 0.5))
+%0D
%0Aright =
@@ -468,24 +468,88 @@
75))
-%0Aprint
+%0D%0A%0D%0Awith open('w1_integrate_result.txt', 'w') as f:%0D%0A f.write(
'left
-
+:
%7B0%7D
+ bit%5Cn
'.fo
@@ -561,26 +561,62 @@
left
-)%0Aprint
+ * 1.44))%0D%0A f.flush()%0D%0A f.write(
'right
-
+:
%7B0%7D
+ bit%5Cn
'.fo
@@ -629,24 +629,60 @@
ight
-)%0Aprint
+ * 1.44))%0D%0A f.flush()%0D%0A f.write(
'all
-
+:
%7B0%7D
+ bit%5Cn
'.fo
@@ -682,24 +682,25 @@
t%5Cn'.format(
+(
left + right
@@ -700,9 +700,49 @@
+ right)
+ * 1.44))%0D%0A f.flush()%0D%0A f.close()%0D
%0A
|
c3b9d629710bd5b106e33c921a0aa23dd435ce56
|
Exclude package-index on non-deploy
|
lib/python2.7/site-packages/autobuilder/buildsteps/BuildImages.py
|
lib/python2.7/site-packages/autobuilder/buildsteps/BuildImages.py
|
'''
Created on Jan 6, 2013
__author__ = "Elizabeth 'pidge' Flanagan"
__copyright__ = "Copyright 2012-2013, Intel Corp."
__credits__ = ["Elizabeth Flanagan"]
__license__ = "GPL"
__version__ = "2.0"
__maintainer__ = "Elizabeth Flanagan"
__email__ = "[email protected]"
'''
from buildbot.steps.shell import ShellCommand
from buildbot.process.buildstep import LogLineObserver
from distutils.version import StrictVersion
from buildbot.status.results import SUCCESS
import os
class BuildImages(ShellCommand):
haltOnFailure = False
flunkOnFailure = True
name = "BuildImages"
def __init__(self, factory, argdict=None, **kwargs):
self.layerversion_yoctobsp=None
self.machine=""
self.images=""
self._pendingLogObservers = []
self.factory = factory
for k, v in argdict.iteritems():
setattr(self, k, v)
# Timeout needs to be passed to LoggingBuildStep as a kwarg
self.timeout = 100000
kwargs['timeout']=self.timeout
ShellCommand.__init__(self, **kwargs)
def start(self):
self.layerversion_yoctobsp = self.getProperty("layerversion_yoctobsp")
self.layerversion_core = self.getProperty("layerversion_core")
self.machine = self.getProperty("MACHINE")
self.minnowExists = self.getProperty("minnowExists")
if self.images == "#TOASTER":
bitbakeflags = "-k -w 'bitbake_eventlog.json' "
self.images=self.getProperty("custom_images")
self.command = ". ./oe-init-build-env; bitbake " + bitbakeflags + self.images
self.description = ["Building " + str(self.images)]
# core-image-basic rename
# See: http://git.yoctoproject.org/cgit/cgit.cgi/poky/commit/?id=b7f1cca517bbd4191828c6bae32e0c5041f1ff19
# I hate making people change their configs, so support both.
else:
if self.layerversion_core < "4":
self.images=self.images.replace("core-image-full-cmdline", "core-image-basic")
else:
self.images=self.images.replace("core-image-basic", "core-image-full-cmdline")
if "minnow" in self.machine:
if self.minnowExists is "False":
self.command = "echo 'Minnowboard layer does not build for 1.8'"
self.description = ["Skipping Minnowboard"]
if self.layerversion_yoctobsp is not None \
and int(self.layerversion_yoctobsp) < 2 \
and self.machine is not None \
and self.machine == "genericx86-64":
self.command = "echo 'Skipping Step.'"
else:
bitbakeflags = "-k "
# -w only exists in bitbake 1.25 and newer, use distroversion string and make sure we're on poky >1.7
if self.getProperty('bitbakeversion') and StrictVersion(self.getProperty('bitbakeversion')) >= StrictVersion("1.25"):
bitbakeflags += "-w 'bitbake_eventlog.json' "
if self.minnowExists is None or self.minnowExists == "True":
self.command = ". ./oe-init-build-env; bitbake " + bitbakeflags + self.images
self.description = ["Building " + str(self.images)]
ShellCommand.start(self)
def describe(self, done=False):
description = ShellCommand.describe(self, done)
if self.layerversion_yoctobsp is not None and int(self.layerversion_yoctobsp) < 2 and self.machine is not None and self.machine == "genericx86-64":
description.append("genericx86-64 does not exist in this branch. Skipping")
return description
|
Python
| 0 |
@@ -1338,24 +1338,166 @@
nowExists%22)%0A
+ try:%0A self.deploycheck = self.getProperty('custom_deploy_artifacts')%0A except:%0A self.deploycheck = %22True%22%0A
if s
@@ -1793,16 +1793,255 @@
images)%5D
+%0A elif self.images == %22package-index%22 and str(self.deploycheck) == %22False%22:%0A self.command = %22echo 'Skipping build as the required artifacts are not there.'%22%0A self.description = %5B%22Skipping due to Non-Published%22%5D
%0A%0A
|
f2bcbddab48eff06df78faff1ebb47c28adb4e0d
|
fix schema test
|
altair/tests/test_schema.py
|
altair/tests/test_schema.py
|
from altair.schema import SCHEMA
def test_schema():
assert SCHEMA["$schema"]=="http://json-schema.org/draft-04/schema#"
|
Python
| 0.000001 |
@@ -19,22 +19,27 @@
import
-SCHEMA
+load_schema
%0A%0Adef te
@@ -51,16 +51,43 @@
hema():%0A
+ schema = load_schema()%0A
asse
@@ -93,14 +93,14 @@
ert
-SCHEMA
+schema
%5B%22$s
|
48f4c8dba40cb2fe03a74a7a4d7d979892601ddc
|
use __file__ to determine library path
|
tests/context.py
|
tests/context.py
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('..'))
import sample
|
Python
| 0.000002 |
@@ -78,14 +78,55 @@
ath(
+os.path.join(os.path.dirname(__file__),
'..'))
+)
%0A%0Aim
@@ -136,8 +136,9 @@
t sample
+%0A
|
24b2dd2f84a2a9ece9a9a4f7898c6f29233c19bc
|
Add message to welcome accepted students.
|
app/soc/modules/gsoc/models/program.py
|
app/soc/modules/gsoc/models/program.py
|
# Copyright 2009 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains the GSoC specific Program Model.
"""
from google.appengine.ext import db
from django.utils.translation import ugettext
from soc.models import program
class GSoCProgramMessages(program.ProgramMessages):
"""The GSoCProgramMessages model.
"""
#: Message sent to the students that are accepted for the program.
accepted_students_msg = db.TextProperty(required=False,
verbose_name=ugettext('Accepted Students Message'))
#: Message sent to the students that are rejected for the program.
rejected_students_msg = db.TextProperty(required=False,
verbose_name=ugettext('Rejected Students Message'))
class GSoCProgram(program.Program):
"""GSoC Program model extends the basic Program model.
"""
_messages_model = GSoCProgramMessages
homepage_url_name = "gsoc_homepage"
#: Required field storing application limit of the program.
apps_tasks_limit = db.IntegerProperty(required=True,
verbose_name=ugettext('Application/Tasks Limit'))
apps_tasks_limit.group = program.GENERAL_INFO_GROUP
apps_tasks_limit.help_text = ugettext(
'<small><i>e.g.</i></small> '
'<tt><b>20</b> is the student applications limit for <i>Google Summer '
'of Code</i>.</tt>')
#: Optional field storing minimum slots per organization
min_slots = db.IntegerProperty(required=False, default=1,
verbose_name=ugettext('Min slots per org'))
min_slots.group = program.GENERAL_INFO_GROUP
min_slots.help_text = ugettext(
'The amount of slots each org should get at the very least.')
#: Optional field storing maximum slots per organization
max_slots = db.IntegerProperty(required=False, default=50,
verbose_name=ugettext('Max slots per org'))
max_slots.group = program.GENERAL_INFO_GROUP
max_slots.help_text = ugettext(
'The amount of slots each organization should get at most.')
#: Required field storing slots limit of the program.
slots = db.IntegerProperty(required=True,
verbose_name=ugettext('Slots'))
slots.group = program.GENERAL_INFO_GROUP
slots.help_text = ugettext(
'<small><i>e.g.</i></small> '
'<tt><b>500</b> might be an amount of slots for <i>Google Summer '
'of Code</i>, which indicates how many students can be accepted.</tt>')
#: Optional field storing the allocation of slots for this program
slots_allocation = db.TextProperty(required=False,
verbose_name=ugettext('the allocation of slots.'))
#: Whether the slots allocations are visible
allocations_visible = db.BooleanProperty(default=False,
verbose_name=ugettext('Slot allocations visible'))
allocations_visible.group = program.GENERAL_INFO_GROUP
allocations_visible.help_text = ugettext(
'Field used to indicate if the slot allocations should be visible.')
#: Whether the duplicates are visible
duplicates_visible = db.BooleanProperty(default=False,
verbose_name=ugettext('Duplicate proposals visible'))
duplicates_visible.group = program.GENERAL_INFO_GROUP
duplicates_visible.help_text = ugettext(
'Field used to indicate if duplicate proposals should be made visible '
'to org admins.')
|
Python
| 0 |
@@ -1033,24 +1033,319 @@
Message'))%0A%0A
+ #: Message sent to welcome accepted students to the program. This does%0A #: not include any personalized text from the organization they got%0A #: accepted for.%0A accepted_students_welcome_msg = db.TextProperty(required=False,%0A verbose_name=ugettext('Accepted Students Welcome Message'))%0A%0A
#: Message
|
3c3013b8e7de5e1f8ae57e1d4a8b672cab8f6c47
|
Test helpers : Message box, click yes vs enter
|
tests/helpers.py
|
tests/helpers.py
|
from PyQt5.QtWidgets import QApplication, QMessageBox, QDialog, QFileDialog
from PyQt5.QtCore import Qt
from PyQt5.QtTest import QTest
def click_on_top_message_box():
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QMessageBox):
QTest.keyClick(w, Qt.Key_Enter)
elif isinstance(w, QDialog) and w.windowTitle() == "Registration":
QTest.keyClick(w, Qt.Key_Enter)
def select_file_dialog(filename):
topWidgets = QApplication.topLevelWidgets()
for w in topWidgets:
if isinstance(w, QFileDialog) and w.isVisible():
w.hide()
w.selectFile(filename)
w.show()
w.accept()
|
Python
| 0 |
@@ -296,32 +296,59 @@
est.
-key
+mouse
Click(w
-, Qt.Key_Enter
+.button(QMessageBox.Yes), Qt.LeftButton
)%0A
|
9fb370d9cd24e2ebe5ec97b01b2d0b78f71d6e2d
|
Add legacy urls for org homepage
|
app/soc/modules/gsoc/views/org_home.py
|
app/soc/modules/gsoc/views/org_home.py
|
#!/usr/bin/env python2.5
#
# Copyright 2011 the Melange authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module containing the views for GSoC Homepage Application.
"""
__authors__ = [
'"Madhusudan.C.S" <[email protected]>',
]
from google.appengine.api import users
from django.conf.urls.defaults import url
from django.core.urlresolvers import reverse
from soc.logic import dicts
from soc.logic.exceptions import AccessViolation
from soc.logic.helper import timeline as timeline_helper
from soc.views.template import Template
from soc.views.helper.access_checker import isSet
from soc.modules.gsoc.logic.models.timeline import logic as timeline_logic
from soc.modules.gsoc.logic.models.student_project import logic as sp_logic
from soc.modules.gsoc.views.base import RequestHandler
from soc.modules.gsoc.views.helper import lists
from soc.modules.gsoc.views.helper import url_patterns
class Apply(Template):
"""Apply template.
"""
def __init__(self, data, current_timeline):
self.data = data
self.current_timeline = current_timeline
def context(self):
organization = self.data.organization
context = {
'request_data': self.data,
'current_timeline': self.current_timeline,
'organization': organization,
}
context['apply_block'] = True
if not self.data.profile:
kwargs = dicts.filter(self.data.kwargs, ['sponsor', 'program'])
suffix = '?org=' + self.data.organization.link_id
if self.data.timeline.studentSignup():
kwargs['role'] = 'student'
context['student_profile_link'] = reverse('create_gsoc_profile',
kwargs=kwargs) + suffix
kwargs['role'] = 'mentor'
context['mentor_profile_link'] = reverse('create_gsoc_profile',
kwargs=kwargs) + suffix
else:
kwargs_org = dicts.filter(self.data.kwargs,
['sponsor', 'program', 'organization'])
if self.data.student_info:
context['submit_proposal_link'] = reverse('submit_gsoc_proposal',
kwargs=kwargs_org)
elif organization not in self.data.mentor_for:
context['mentor_request_link'] = reverse('gsoc_request',
kwargs=kwargs_org)
else:
context['apply_block'] = False
return context
def templatePath(self):
return "v2/modules/gsoc/org_home/_apply.html"
class Contact(Template):
"""Organization Contact template.
"""
def __init__(self, data):
self.data = data
def context(self):
return {
'facebook_link': self.data.organization.facebook,
'twitter_link': self.data.organization.twitter,
'blogger_link': self.data.organization.blog,
'pub_mailing_list_link': self.data.organization.pub_mailing_list,
'irc_channel_link': self.data.organization.irc_channel,
}
def templatePath(self):
return "v2/modules/gsoc/_connect_with_us.html"
class ProjectList(Template):
"""Template for list of student projects accepted under the organization.
"""
def __init__(self, request, data):
self.request = request
self.data = data
list_config = lists.ListConfiguration()
list_config.addColumn('student', 'Student',
lambda entity, *args: entity.student.user.name)
list_config.addSimpleColumn('title', 'Title')
self._list_config = list_config
def context(self):
list = lists.ListConfigurationResponse(
self._list_config, idx=0,
description='List of projects accepted into %s' % (
self.data.organization.name))
return {
'lists': [list],
}
def getListData(self):
"""Returns the list data as requested by the current request.
If the lists as requested is not supported by this component None is
returned.
"""
idx = lists.getListIndex(self.request)
if idx == 0:
fields = {'scope': self.data.organization,
'status': 'accepted'}
response_builder = lists.QueryContentResponseBuilder(
self.request, self._list_config, sp_logic,
fields, prefetch=['student'])
return response_builder.build()
else:
return None
def templatePath(self):
return "v2/modules/gsoc/org_home/_project_list.html"
class OrgHome(RequestHandler):
"""View methods for Organization Home page.
"""
def templatePath(self):
return 'v2/modules/gsoc/org_home/base.html'
def djangoURLPatterns(self):
"""Returns the list of tuples for containing URL to view method mapping.
"""
return [
url(r'^gsoc/org/%s$' % url_patterns.ORG, self,
name='gsoc_org_home')
]
def checkAccess(self):
"""Access checks for GSoC Organization Application.
"""
self.mutator.organizationFromKwargs()
def jsonContext(self):
"""Handler for JSON requests.
"""
assert isSet(self.data.organization)
list_content = ProjectList(self.request, self.data).getListData()
if not list_content:
raise AccessViolation(
'You do not have access to this data')
return list_content.content()
def context(self):
"""Handler to for GSoC Organization Home page HTTP get request.
"""
current_timeline = timeline_logic.getCurrentTimeline(
self.data.program_timeline, self.data.org_app)
assert isSet(self.data.organization)
organization = self.data.organization
context = {
'page_name': '%s - Homepage' % organization.short_name,
'organization': organization,
'contact': Contact(self.data),
'tags': organization.tags_string(organization.org_tag),
}
if self.data.adminFor(organization):
self.redirect.organization(organization)
context['edit_link'] = self.redirect.urlOf('edit_gsoc_org_profile')
# Render the apply template only when the user is not logged in
# or has no role for the organization
if (not self.data.user) or not self.data.mentorFor(organization):
context['apply'] = Apply(self.data, current_timeline)
if timeline_helper.isAfterEvent(
self.data.program_timeline, 'accepted_students_announced_deadline'):
context['project_list'] = ProjectList(self.request, self.data)
return context
|
Python
| 0.000001 |
@@ -5271,24 +5271,141 @@
_org_home')%0A
+ url(r'%5Egsoc/org/show/%25s$' %25 url_patterns.ORG, self),%0A url(r'%5Eorg/show/%25s$' %25 url_patterns.ORG, self),%0A
%5D%0A%0A def
|
1ab939ed7da45e7f6ff113b7e71017b28ee877a2
|
Use 'with' keyword while opening file in tests/helpers.py
|
tests/helpers.py
|
tests/helpers.py
|
import razorpay
import os
import unittest
def mock_file(filename):
if not filename:
return ''
file_dir = os.path.dirname(__file__)
file_path = "{}/mocks/{}.json".format(file_dir, filename)
return open(file_path).read()
class ClientTestCase(unittest.TestCase):
def setUp(self):
self.base_url = 'https://api.razorpay.com/v1'
self.secondary_url = 'https://test-api.razorpay.com/v1'
self.payment_id = 'fake_payment_id'
self.refund_id = 'fake_refund_id'
self.card_id = 'fake_card_id'
self.customer_id = 'fake_customer_id'
self.token_id = 'fake_token_id'
self.addon_id = 'fake_addon_id'
self.subscription_id = 'fake_subscription_id'
self.plan_id = 'fake_plan_id'
self.settlement_id = 'fake_settlement_id'
self.client = razorpay.Client(auth=('key_id', 'key_secret'))
self.secondary_client = razorpay.Client(auth=('key_id', 'key_secret'),
base_url=self.secondary_url)
|
Python
| 0.000002 |
@@ -208,22 +208,20 @@
me)%0A
-return
+with
open(fi
@@ -232,15 +232,74 @@
ath)
-.read()
+ as f:%0A mock_file_data = f.read()%0A return mock_file_data
%0A%0A%0Ac
|
9f069cf4fe634f34ccda29c18c03c63db04fe199
|
Update Funcaptcha example
|
examples/funcaptcha.py
|
examples/funcaptcha.py
|
import requests
from os import environ
import re
from random import choice
from python_anticaptcha import AnticaptchaClient, FunCaptchaTask, Proxy
api_key = environ['KEY']
site_key_pattern = 'data-pkey="(.+?)"'
url = 'https://www.funcaptcha.com/demo/'
client = AnticaptchaClient(api_key)
session = requests.Session()
UA = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 ' \
'(KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'
session.headers = {'User-Agent': UA}
proxy_urls = environ['PROXY_URL'].split(',')
def get_form_html():
return session.get(url).text
def get_token(form_html):
proxy_url = choice(proxy_urls)
proxy = Proxy.parse_url(proxy_url)
site_key = re.search(site_key_pattern, form_html).group(1)
task = FunCaptchaTask(url, site_key, proxy=proxy, user_agent=UA)
job = client.createTask(task)
job.join(maximum_time=10**4)
return job.get_token_response()
def process():
html = get_form_html()
return get_token(html)
if __name__ == '__main__':
print(process())
|
Python
| 0 |
@@ -1,16 +1,51 @@
+from urllib.parse import urlparse%0A%0A
import requests%0A
@@ -172,15 +172,8 @@
Task
-, Proxy
%0A%0Aap
@@ -559,24 +559,277 @@
plit(',')%0A%0A%0A
+def parse_url(url):%0A parsed = urlparse(url)%0A return dict(%0A proxy_type=parsed.scheme,%0A proxy_address=parsed.hostname,%0A proxy_port=parsed.port,%0A proxy_login=parsed.username,%0A proxy_password=parsed.password%0A )%0A%0A%0A
def get_form
@@ -933,16 +933,16 @@
y_urls)%0A
+
prox
@@ -949,14 +949,8 @@
y =
-Proxy.
pars
|
d3583108eca98f72b9b4898a5cc5e9cf1cacf251
|
Fix log_invocation test on python2 with hash randomization
|
test/units/module_utils/basic/test__log_invocation.py
|
test/units/module_utils/basic/test__log_invocation.py
|
# -*- coding: utf-8 -*-
# (c) 2016, James Cammarata <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division)
__metaclass__ = type
import json
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import MagicMock
class TestModuleUtilsBasic(unittest.TestCase):
@unittest.skip("Skipping due to unknown reason. See #15105")
def test_module_utils_basic__log_invocation(self):
from ansible.module_utils import basic
# test basic log invocation
basic.MODULE_COMPLEX_ARGS = json.dumps(dict(foo=False, bar=[1,2,3], bam="bam", baz=u'baz'))
am = basic.AnsibleModule(
argument_spec=dict(
foo = dict(default=True, type='bool'),
bar = dict(default=[], type='list'),
bam = dict(default="bam"),
baz = dict(default=u"baz"),
password = dict(default=True),
no_log = dict(default="you shouldn't see me", no_log=True),
),
)
am.log = MagicMock()
am._log_invocation()
am.log.assert_called_with(
'Invoked with bam=bam bar=[1, 2, 3] foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD ',
log_args={
'foo': 'False',
'bar': '[1, 2, 3]',
'bam': 'bam',
'baz': 'baz',
'password': 'NOT_LOGGING_PASSWORD',
'no_log': 'NOT_LOGGING_PARAMETER',
},
)
|
Python
| 0.000002 |
@@ -820,16 +820,27 @@
= type%0A%0A
+import sys%0A
import j
@@ -843,16 +843,16 @@
rt json%0A
-
%0Afrom an
@@ -982,17 +982,16 @@
tCase):%0A
-%0A
@uni
@@ -1004,52 +1004,80 @@
skip
-(%22Skipping due to unknown reason. See #15105
+If(sys.version_info%5B0%5D %3E= 3, %22Python 3 is not supported on targets (yet)
%22)%0A
@@ -1782,24 +1782,25 @@
ation()%0A
+%0A
am.log.a
@@ -1795,15 +1795,133 @@
-am.log.
+# Message is generated from a dict so it will be in an unknown order.%0A # have to check this manually rather than with
asse
@@ -1935,16 +1935,17 @@
ed_with(
+)
%0A
@@ -1949,140 +1949,723 @@
- 'Invoked with bam=bam bar=%5B1, 2, 3%5D foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD ',%0A
+args = am.log.call_args%5B0%5D%0A self.assertEqual(len(args), 1)%0A message = args%5B0%5D%0A%0A self.assertEqual(len(message), len('Invoked with bam=bam bar=%5B1, 2, 3%5D foo=False baz=baz no_log=NOT_LOGGING_PARAMETER password=NOT_LOGGING_PASSWORD'))%0A self.assertTrue(message.startswith('Invoked with '))%0A self.assertIn(' bam=bam', message)%0A self.assertIn(' bar=%5B1, 2, 3%5D', message)%0A self.assertIn(' foo=False', message)%0A self.assertIn(' baz=baz', message)%0A self.assertIn(' no_log=NOT_LOGGING_PARAMETER', message)%0A self.assertIn(' password=NOT_LOGGING_PASSWORD', message)%0A%0A kwargs = am.log.call_args%5B1%5D%0A self.assertEqual(kwargs, %0A dict(
log_
@@ -2671,16 +2671,20 @@
_args=%7B%0A
+
@@ -2723,16 +2723,20 @@
+
'bar': '
@@ -2763,16 +2763,20 @@
+
+
'bam': '
@@ -2781,16 +2781,20 @@
'bam',%0A
+
@@ -2831,16 +2831,20 @@
+
+
'passwor
@@ -2887,16 +2887,20 @@
+
+
'no_log'
@@ -2942,11 +2942,19 @@
-%7D,%0A
+ %7D)%0A
|
d2fb1f22be6c6434873f2bcafb6b8a9b714acde9
|
Use fail signal in fail_archive_on_error decorator
|
website/archiver/decorators.py
|
website/archiver/decorators.py
|
import functools
from framework.exceptions import HTTPError
from website.project.decorators import _inject_nodes
from website.archiver import ARCHIVER_UNCAUGHT_ERROR
from website.archiver import utils
def fail_archive_on_error(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
try:
return func(*args, **kwargs)
except HTTPError as e:
_inject_nodes(kwargs)
registration = kwargs['node']
utils.handle_archive_fail(
ARCHIVER_UNCAUGHT_ERROR,
registration.registered_from,
registration,
registration.registered_user,
str(e)
)
return wrapped
|
Python
| 0.000001 |
@@ -190,19 +190,21 @@
import
-uti
+signa
ls%0A%0Adef
@@ -471,21 +471,21 @@
-utils.handle_
+signals.send.
arch
@@ -514,153 +514,61 @@
-ARCHIVER_UNCAUGHT_ERROR,%0A registration.registered_from,%0A registration,%0A registration.registered_user
+registration,%0A ARCHIVER_UNCAUGHT_ERROR
,%0A
@@ -585,14 +585,16 @@
+%5B
str(e)
+%5D
%0A
|
3caa77b0f4b43e274eba21a8d759335f7833b99d
|
Change OSF_COOKIE_DOMAIN to None in local-dist.py
|
website/settings/local-dist.py
|
website/settings/local-dist.py
|
# -*- coding: utf-8 -*-
'''Example settings/local.py file.
These settings override what's in website/settings/defaults.py
NOTE: local.py will not be added to source control.
'''
from . import defaults
DEV_MODE = True
DEBUG_MODE = True # Sets app to debug mode, turns off template caching, etc.
SEARCH_ENGINE = 'elastic'
ELASTIC_TIMEOUT = 10
# Comment out to use SHARE in development
USE_SHARE = False
# Comment out to use celery in development
USE_CELERY = False
# Comment out to use GnuPG in development
USE_GNUPG = False # Changing this may require you to re-enter encrypted fields
# Email
USE_EMAIL = False
MAIL_SERVER = 'localhost:1025' # For local testing
MAIL_USERNAME = 'osf-smtp'
MAIL_PASSWORD = 'CHANGEME'
# Mailchimp email subscriptions
ENABLE_EMAIL_SUBSCRIPTIONS = False
# Session
OSF_COOKIE_DOMAIN = '.localhost'
COOKIE_NAME = 'osf'
SECRET_KEY = "CHANGEME"
# Uncomment if GPG was installed with homebrew
# GNUPG_BINARY = '/usr/local/bin/gpg'
##### Celery #####
## Default RabbitMQ broker
BROKER_URL = 'amqp://'
# Default RabbitMQ backend
CELERY_RESULT_BACKEND = 'amqp://'
USE_CDN_FOR_CLIENT_LIBS = False
# Example of extending default settings
# defaults.IMG_FMTS += ["pdf"]
|
Python
| 0 |
@@ -822,20 +822,12 @@
N =
-'.localhost'
+None
%0ACOO
|
22ae3a2e9a236de61c078d234d920a3e6bc62d7b
|
Add a bit of docs
|
pylisp/application/lispd/address_tree/ddt_container_node.py
|
pylisp/application/lispd/address_tree/ddt_container_node.py
|
'''
Created on 1 jun. 2013
@author: sander
'''
from .container_node import ContainerNode
class DDTContainerNode(ContainerNode):
pass
|
Python
| 0.000004 |
@@ -132,9 +132,110 @@
-pass
+'''%0A A ContainerNode that indicates that we are responsible for this part of%0A the DDT tree.%0A '''
%0A
|
1d5285f08bc824ef1a83c2491c75b63df8c63da7
|
allow qualification documents upload only in pending
|
openprocurement/tender/openeu/views/qualification_document.py
|
openprocurement/tender/openeu/views/qualification_document.py
|
# -*- coding: utf-8 -*-
from logging import getLogger
from openprocurement.api.utils import (
get_file,
save_tender,
upload_file,
apply_patch,
update_file_content_type,
json_view,
context_unpack,
)
from openprocurement.api.validation import (
validate_file_update,
validate_file_upload,
validate_patch_document_data,
)
from openprocurement.tender.openeu.utils import qualifications_resource
LOGGER = getLogger(__name__)
@qualifications_resource(
name='TenderEU Qualification Documents',
collection_path='/tenders/{tender_id}/qualifications/{qualification_id}/documents',
path='/tenders/{tender_id}/qualifications/{qualification_id}/documents/{document_id}',
procurementMethodType='aboveThresholdEU',
description="Tender qualification documents")
class TenderQualificationDocumentResource(object):
def __init__(self, request, context):
self.context = context
self.request = request
self.db = request.registry.db
@json_view(permission='view_tender')
def collection_get(self):
"""Tender Qualification Documents List"""
if self.request.params.get('all', ''):
collection_data = [i.serialize("view") for i in self.context.documents]
else:
collection_data = sorted(dict([
(i.id, i.serialize("view"))
for i in self.context.documents
]).values(), key=lambda i: i['dateModified'])
return {'data': collection_data}
@json_view(permission='edit_tender', validators=(validate_file_upload,))
def collection_post(self):
"""Tender Qualification Document Upload
"""
if self.request.validated['tender_status'] != 'active.pre-qualification':
self.request.errors.add('body', 'data', 'Can\'t add document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
qualification = self.request.validated['qualification']
if qualification.status not in ['pending', 'active']:
self.request.errors.add('body', 'data', 'Can\'t add document in current qualification status')
self.request.errors.status = 403
return
document = upload_file(self.request)
self.context.documents.append(document)
if save_tender(self.request):
LOGGER.info('Created tender qualification document {}'.format(document.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_qualification_document_create'}, {'document_id': document.id}))
self.request.response.status = 201
document_route = self.request.matched_route.name.replace("collection_", "")
self.request.response.headers['Location'] = self.request.current_route_url(_route_name=document_route, document_id=document.id, _query={})
return {'data': document.serialize("view")}
@json_view(permission='view_tender')
def get(self):
"""Tender Qualification Document Read"""
if self.request.params.get('download'):
return get_file(self.request)
document = self.request.validated['document']
document_data = document.serialize("view")
document_data['previousVersions'] = [
i.serialize("view")
for i in self.request.validated['documents']
if i.url != document.url
]
return {'data': document_data}
@json_view(validators=(validate_file_update,), permission='edit_tender')
def put(self):
"""Tender Qualification Document Update"""
if self.request.validated['tender_status'] != 'active.pre-qualification':
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
qualification = self.request.validated['qualification']
if qualification.status not in ['pending', 'active']:
self.request.errors.add('body', 'data', 'Can\'t update document in current qualification status')
self.request.errors.status = 403
return
document = upload_file(self.request)
self.request.validated['qualification'].documents.append(document)
if save_tender(self.request):
LOGGER.info('Updated tender qualification document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_qualification_document_put'}))
return {'data': document.serialize("view")}
@json_view(content_type="application/json", validators=(validate_patch_document_data,), permission='edit_tender')
def patch(self):
"""Tender Qualification Document Update"""
if self.request.validated['tender_status'] != 'active.pre-qualification':
self.request.errors.add('body', 'data', 'Can\'t update document in current ({}) tender status'.format(self.request.validated['tender_status']))
self.request.errors.status = 403
return
tender = self.request.validated['tender']
qualification = self.request.validated['qualification']
if qualification.status not in ['pending', 'active']:
self.request.errors.add('body', 'data', 'Can\'t update document in current qualification status')
self.request.errors.status = 403
return
if apply_patch(self.request, src=self.request.context.serialize()):
update_file_content_type(self.request)
LOGGER.info('Updated tender qualification document {}'.format(self.request.context.id),
extra=context_unpack(self.request, {'MESSAGE_ID': 'tender_qualification_document_patch'}))
return {'data': self.request.context.serialize("view")}
|
Python
| 0 |
@@ -2109,32 +2109,27 @@
tion.status
-not in %5B
+!=
'pending', '
@@ -2121,35 +2121,24 @@
!= 'pending'
-, 'active'%5D
:%0A
@@ -4123,32 +4123,27 @@
tion.status
-not in %5B
+!=
'pending', '
@@ -4135,35 +4135,24 @@
!= 'pending'
-, 'active'%5D
:%0A
@@ -5385,16 +5385,11 @@
tus
-not in %5B
+!=
'pen
@@ -5397,19 +5397,8 @@
ing'
-, 'active'%5D
:%0A
|
17565e1725e3d5dff6313308cbc24bfcf55bf2f3
|
Load frequency from parameter server
|
thruster_interface/scripts/thruster_interface_node.py
|
thruster_interface/scripts/thruster_interface_node.py
|
#!/usr/bin/env python
import math
import Adafruit_PCA9685
import numpy
import rospy
from vortex_msgs.msg import Float64ArrayStamped, Pwm
from thruster_interface.srv import ThrustersEnable, ThrustersEnableResponse
# Constants
PWM_BITS_PER_PERIOD = 4096.0 # 12 bit PWM
FREQUENCY = rospy.get_param('/pwm/frequency/set')
FREQUENCY_MEASURED = rospy.get_param('/pwm/frequency/measured')
PERIOD_LENGTH_IN_MICROSECONDS = 1000000.0 / FREQUENCY_MEASURED
THRUST_RANGE_LIMIT = 100
LOOKUP_THRUST = rospy.get_param('/thrusters/characteristics/thrust')
LOOKUP_PULSE_WIDTH = rospy.get_param('/thrusters/characteristics/pulse_width')
NUM_THRUSTERS = rospy.get_param('/propulsion/thrusters/num')
MAX_RATE = rospy.get_param('/thrusters/rate_of_change/max')
RATE_LIMITING_ENABLED = rospy.get_param('/thruster_interface/rate_limiting_enabled')
THRUSTERS_CONNECTED = rospy.get_param('/thruster_interface/thrusters_connected')
THRUSTER_PWM_PINS = rospy.get_param('/pwm/pins/thrusters')
class ThrusterInterface(object):
def __init__(self):
rospy.init_node('thruster_interface', anonymous=False)
self.pub = rospy.Publisher('debug/thruster_pwm', Float64ArrayStamped, queue_size=10)
self.pub_pwm = rospy.Publisher('pwm', Pwm, queue_size=10)
self.sub = rospy.Subscriber('thruster_forces', Float64ArrayStamped, self.callback)
self.srv = rospy.Service('/thruster_interface/thrusters_enable', ThrustersEnable, self.handle_thrusters_enable)
self.prev_time = rospy.get_rostime()
self.is_initialized = False
# The setpoint is the desired value (input)
self.thrust_setpoint = numpy.zeros(NUM_THRUSTERS)
# The reference is the output value (rate limited)
self.thrust_reference = numpy.zeros(NUM_THRUSTERS)
self.thrusters_enabled = True
self.output_to_zero()
rospy.on_shutdown(self.output_to_zero)
rospy.loginfo("Launching at %d Hz", FREQUENCY)
def output_to_zero(self):
neutral_pulse_width = self.microsecs_to_bits(self.thrust_to_microsecs(0))
if THRUSTERS_CONNECTED and self.thrusters_enabled:
pwm_msg = Pwm()
for i in range(NUM_THRUSTERS):
pwm_msg.pins.append(THRUSTER_PWM_PINS[i])
pwm_msg.on.append(0)
pwm_msg.off.append(neutral_pulse_width)
self.pub_pwm.publish(pwm_msg)
def callback(self, msg):
if not self.healthy_message(msg):
return
if not self.is_initialized:
self.prev_time = msg.header.stamp
self.is_initialized = True
rospy.loginfo('Successfully initialized.')
return
curr_time = msg.header.stamp
dt = (curr_time - self.prev_time).to_sec()
if (dt <= 0) and RATE_LIMITING_ENABLED:
rospy.logwarn_throttle(10, 'Zero time difference between messages, ignoring...')
return
self.prev_time = curr_time
thrust_setpoint_list = msg.data
self.thrust_setpoint = thrust_setpoint_list
self.update_reference(dt)
self.set_pwm()
def handle_thrusters_enable(self, req):
if req.thrusters_enable:
rospy.loginfo('Enabling thrusters')
self.thrusters_enabled = True
else:
rospy.loginfo('Disabling thrusters')
self.output_to_zero()
self.thrusters_enabled = False
return ThrustersEnableResponse()
def thrust_to_microsecs(self, thrust):
return numpy.interp(thrust, LOOKUP_THRUST, LOOKUP_PULSE_WIDTH)
def microsecs_to_bits(self, microsecs):
duty_cycle_normalized = microsecs / PERIOD_LENGTH_IN_MICROSECONDS
return int(round(PWM_BITS_PER_PERIOD * duty_cycle_normalized))
def update_reference(self, dt):
if RATE_LIMITING_ENABLED:
rate_of_change = (self.thrust_setpoint - self.thrust_reference) / dt
for i in range(NUM_THRUSTERS):
if rate_of_change[i] > MAX_RATE:
self.thrust_reference[i] += dt * MAX_RATE
elif rate_of_change[i] < -MAX_RATE:
self.thrust_reference[i] -= dt * MAX_RATE
else:
self.thrust_reference[i] = self.thrust_setpoint[i]
else:
self.thrust_reference = self.thrust_setpoint
def set_pwm(self):
microsecs = [None] * NUM_THRUSTERS
pwm_msg = Pwm()
for i in range(NUM_THRUSTERS):
microsecs[i] = self.thrust_to_microsecs(self.thrust_reference[i])
pwm_bits = self.microsecs_to_bits(microsecs[i])
pwm_msg.pins.append(THRUSTER_PWM_PINS[i])
pwm_msg.on.append(0)
pwm_msg.off.append(pwm_bits)
if THRUSTERS_CONNECTED and self.thrusters_enabled:
self.pub_pwm.publish(pwm_msg)
# Publish outputs for debug
debug_msg = Float64ArrayStamped()
debug_msg.header.stamp = rospy.get_rostime()
debug_msg.data = microsecs
self.pub.publish(debug_msg)
def healthy_message(self, msg):
if (len(msg.data) != NUM_THRUSTERS):
rospy.logwarn_throttle(10, 'Wrong number of thrusters, ignoring...')
return False
for t in msg.data:
if math.isnan(t) or math.isinf(t) or (abs(t) > THRUST_RANGE_LIMIT):
rospy.logwarn_throttle(10, 'Message out of range, ignoring...')
return False
return True
if __name__ == '__main__':
try:
thruster_interface = ThrusterInterface()
rospy.spin()
except rospy.ROSInterruptException:
pass
|
Python
| 0 |
@@ -32,32 +32,8 @@
ath%0A
-import Adafruit_PCA9685%0A
impo
|
8acaec546de0311f5f33c2e8fb9e1828a1cbc44b
|
Fix memory leak caused by using rabbit as the result backend for celery
|
worker_manager/celeryconfig.py
|
worker_manager/celeryconfig.py
|
"""
Configuration file for celerybeat/worker.
Dynamically adds consumers from all manifest files in worker_manager/manifests/
to the celerybeat schedule. Also adds a heartbeat function to the schedule,
which adds every 30 seconds, and a monthly task to normalize all non-normalized
documents.
"""
from celery.schedules import crontab
from datetime import timedelta
import os
import yaml
BROKER_URL = 'amqp://guest@localhost'
CELERY_RESULT_BACKEND = 'amqp://guest@localhost'
CELERY_TASK_SERIALIZER = 'pickle'
CELERY_RESULT_SERIALIZER = 'pickle'
CELERY_ACCEPT_CONTENT = ['pickle']
CELERY_ENABLE_UTC = True
CELERY_TIMEZONE = 'UTC'
CELERY_IMPORTS = ('worker_manager.celerytasks',)
# Programmatically generate celery beat schedule
SCHED = {}
for manifest in os.listdir('worker_manager/manifests/'):
filepath = 'worker_manager/manifests/' + manifest
with open(filepath) as f:
info = yaml.load(f)
SCHED['run ' + manifest] = {
'task': 'worker_manager.celerytasks.run_consumer',
'schedule': crontab(day_of_week=info['days'], hour=info['hour'], minute=info['minute']),
'args': [filepath],
}
# Deprecated
SCHED['request normalization of recent documents'] = {
'task': 'worker_manager.celerytasks.request_normalized',
'schedule': crontab(minute='*/1')
}
SCHED['check_archive'] = {
'task': 'worker_manager.celerytasks.check_archive',
'schedule': crontab(day_of_month='1', hour='23', minute='59'),
}
SCHED['add'] = {
'task': 'worker_manager.celerytasks.heartbeat',
'schedule': timedelta(seconds=30),
'args': (16, 16)
}
CELERYBEAT_SCHEDULE = SCHED
|
Python
| 0.000004 |
@@ -441,16 +441,18 @@
alhost'%0A
+#
CELERY_R
@@ -1481,19 +1481,25 @@
%0ASCHED%5B'
-add
+heartbeat
'%5D = %7B%0A
|
cebba3af4f49173a6cf04d1c0824838772cbbf19
|
Refactor code
|
batchflow/models/tf/nn/train.py
|
batchflow/models/tf/nn/train.py
|
""" Helpers for training """
from math import pi
import tensorflow as tf
from tensorflow.python.framework import ops # pylint: disable=no-name-in-module
from tensorflow.math import sin, asin, floor # pylint: disable=import-error
def piecewise_constant(global_step, *args, **kwargs):
""" Constant learning rate decay (uses global_step param instead of x) """
return tf.train.piecewise_constant(global_step, *args, **kwargs)
def cyclic_learning_rate(learning_rate, global_step, max_lr=0.1, step_size=10, mode='triangular', name=None):
""" Applies cyclic learning rate (CLR).
https://arxiv.org/abs/1506.01186.
This function varies the learning rate between the
minimum (learning_rate) and the maximum (max_lr).
It returns the decayed learning rate.
Parameters
----------
learning_rate: float of tf.Tensor
the minimum learning rate boundary
global_step: int of tf.Tensor.
global step to use for the cyclic computation. Must not be negative
max_lr: float
the maximum learning rate boundary (default=0.1)
step_size: int
the number of iterations in half a cycle (default=10)
mode:
If 'sin' or 'sine' or 'sine wave':
Learning rate changes as a sine wave, starting
from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.
It is computed as:
```python
decayed_learning_rate = (max_lr - learning_rate) / 2 *
sin(pi * global_step / step_size) +
(max_lr + learning_rate) / 2
```
If 'triangular' or 'triangular wave' or 'zigzag':
Default, linearly increasing then linearly decreasing the
learning rate at each cycle. Learning rate starting
from (max_lr-learning_rate)/2 then decreasing to `learning_rate`.
It is computed as:
```python
decayed_learning_rate = (max_lr - learning_rate) / pi *
asin(sin(2 * pi / step_size * global_step)) +
(max_lr + learning_rate) / 2
```
If 'sawtooth' or 'saw' or 'sawtooth wave' or 'saw wave':
Learning rate linearly increasing from `learning_rate` to `max_lr`
and then sharply drops to `learning_rate` at each cycle.
Learning rate starting from `learning_rate` then increasing.
It is computed as:
```python
decayed_learning_rate = (max_lr - learning_rate) *
(floor(global_step / step_size) - global_step / step_size) +
learning_rate
```
name: str
Optional name of the operation (default='CyclicLearningRate')
Returns
-------
tf.Tensor
"""
with ops.name_scope_v2(name or "CyclicLearningRate"):
learning_rate = tf.cast(learning_rate, dtype=tf.float32, name="learning_rate")
global_step = tf.cast(global_step, dtype=tf.float32)
step_size = tf.cast(step_size, dtype=tf.float32)
max_lr = tf.cast(max_lr, dtype=tf.float32)
if mode in ('sin', 'sine', 'sine wave'):
first_factor = (learning_rate - max_lr) / 2.
second_factor = sin((pi * global_step)/step_size)
second_comp = (learning_rate + max_lr) / 2.
elif mode in ('triangular', 'triangular wave', 'zigzag'):
first_factor = (learning_rate-max_lr) / pi
inside_sin = 2. * pi / step_size * global_step
second_factor = asin(sin(inside_sin))
second_comp = (learning_rate + max_lr) / 2.
elif mode in ('sawtooth', 'saw', 'sawtooth wave', 'saw wave'):
first_factor = learning_rate - max_lr
divided_global_step = global_step / step_size
second_factor = floor(divided_global_step) - divided_global_step
second_comp = learning_rate
return first_factor * second_factor + second_comp
|
Python
| 0.000002 |
@@ -71,88 +71,8 @@
tf%0A
-from tensorflow.python.framework import ops # pylint: disable=no-name-in-module%0A
from
@@ -2807,19 +2807,18 @@
with
-ops
+tf
.name_sc
@@ -2922,30 +2922,8 @@
at32
-, name=%22learning_rate%22
)%0A
|
8cb46b963a20e65a64086a1c3e105dc858f22fb3
|
Adds a blacklist of SKPs to exclude from bench expectations.
|
bench/gen_bench_expectations.py
|
bench/gen_bench_expectations.py
|
#!/usr/bin/env python
# Copyright (c) 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
""" Generate bench_expectations file from a given set of bench data files. """
import argparse
import bench_util
import os
import re
import sys
# Parameters for calculating bench ranges.
RANGE_RATIO_UPPER = 1.2 # Ratio of range for upper bounds.
RANGE_RATIO_LOWER = 1.8 # Ratio of range for lower bounds.
ERR_RATIO = 0.08 # Further widens the range by the ratio of average value.
ERR_ABS = 0.5 # Adds an absolute error margin to cope with very small benches.
# List of bench configs to monitor. Ignore all other configs.
CONFIGS_TO_INCLUDE = ['simple_viewport_1000x1000',
'simple_viewport_1000x1000_gpu',
'simple_viewport_1000x1000_scalar_1.100000',
'simple_viewport_1000x1000_scalar_1.100000_gpu',
]
def compute_ranges(benches):
"""Given a list of bench numbers, calculate the alert range.
Args:
benches: a list of float bench values.
Returns:
a list of float [lower_bound, upper_bound].
"""
minimum = min(benches)
maximum = max(benches)
diff = maximum - minimum
avg = sum(benches) / len(benches)
return [minimum - diff * RANGE_RATIO_LOWER - avg * ERR_RATIO - ERR_ABS,
maximum + diff * RANGE_RATIO_UPPER + avg * ERR_RATIO + ERR_ABS]
def create_expectations_dict(revision_data_points):
"""Convert list of bench data points into a dictionary of expectations data.
Args:
revision_data_points: a list of BenchDataPoint objects.
Returns:
a dictionary of this form:
keys = tuple of (config, bench) strings.
values = list of float [expected, lower_bound, upper_bound] for the key.
"""
bench_dict = {}
for point in revision_data_points:
if (point.time_type or # Not walltime which has time_type ''
not point.config in CONFIGS_TO_INCLUDE):
continue
key = (point.config, point.bench)
if key in bench_dict:
raise Exception('Duplicate bench entry: ' + str(key))
bench_dict[key] = [point.time] + compute_ranges(point.per_iter_time)
return bench_dict
def main():
"""Reads bench data points, then calculate and export expectations.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
'-a', '--representation_alg', default='25th',
help='bench representation algorithm to use, see bench_util.py.')
parser.add_argument(
'-b', '--builder', required=True,
help='name of the builder whose bench ranges we are computing.')
parser.add_argument(
'-d', '--input_dir', required=True,
help='a directory containing bench data files.')
parser.add_argument(
'-o', '--output_file', required=True,
help='file path and name for storing the output bench expectations.')
parser.add_argument(
'-r', '--git_revision', required=True,
help='the git hash to indicate the revision of input data to use.')
args = parser.parse_args()
builder = args.builder
data_points = bench_util.parse_skp_bench_data(
args.input_dir, args.git_revision, args.representation_alg)
expectations_dict = create_expectations_dict(data_points)
out_lines = []
keys = expectations_dict.keys()
keys.sort()
for (config, bench) in keys:
(expected, lower_bound, upper_bound) = expectations_dict[(config, bench)]
out_lines.append('%(bench)s_%(config)s_,%(builder)s-%(representation)s,'
'%(expected)s,%(lower_bound)s,%(upper_bound)s' % {
'bench': bench,
'config': config,
'builder': builder,
'representation': args.representation_alg,
'expected': expected,
'lower_bound': lower_bound,
'upper_bound': upper_bound})
with open(args.output_file, 'w') as file_handle:
file_handle.write('\n'.join(out_lines))
if __name__ == "__main__":
main()
|
Python
| 0.998256 |
@@ -978,16 +978,125 @@
%5D%0A%0A
+# List of flaky SKPs that should be excluded.%0ASKPS_TO_EXCLUDE = %5B'desk_chalkboard.skp',%0A %5D%0A%0A
%0Adef com
@@ -2108,16 +2108,58 @@
_INCLUDE
+ or%0A point.bench in SKPS_TO_EXCLUDE
):%0A
|
16806f7a620ddaba727fc6c7d6387eaa1c17f103
|
Update p4-test-tool.py
|
benchexec/tools/p4-test-tool.py
|
benchexec/tools/p4-test-tool.py
|
# This file is part of BenchExec, a framework for reliable benchmarking:
# https://github.com/sosy-lab/benchexec
#
# SPDX-FileCopyrightText: 2007-2020 Dirk Beyer <https://www.sosy-lab.org>
#
# SPDX-License-Identifier: Apache-2.0
import benchexec.util as util
import benchexec.tools.template
import benchexec.result as result
class Tool(benchexec.tools.template.BaseTool2):
#Needed for benchexec to run, but irrelevant for p4 extension
def executable(self, tool):
return "/"
def name(self):
return "P4 Test"
def determine_result(self, run):
for line in run.output:
if run.cmdline[3] + " ... ok" in line:
return benchexec.result.RESULT_CLASS_TRUE
else:
return benchexec.result.RESULT_CLASS_FALSE
|
Python
| 0.000001 |
@@ -807,20 +807,8 @@
S_FALSE%0A
-
|
b9d30a39f31862af607af44e97878a287f9361c5
|
bump to v0.5.3
|
steam/__init__.py
|
steam/__init__.py
|
__version__ = "0.5.2"
__author__ = "Rossen Georgiev"
from steam.steamid import SteamID
from steam.webapi import WebAPI
|
Python
| 0.000001 |
@@ -16,9 +16,9 @@
0.5.
-2
+3
%22%0A__
|
026ba5fa78cb9916bffc23cf7dda1d1deb81b24c
|
Bump version 1.0.3
|
story/__init__.py
|
story/__init__.py
|
"""
Story - PySchool
"""
__author__ = 'PySchool'
__version__ = '1.0.2'
__licence__ = 'MIT'
|
Python
| 0 |
@@ -66,9 +66,9 @@
1.0.
-2
+3
'%0A__
|
8d0e3ae1f80e8b19292b18a20a338cbfd00364c7
|
Bump to version number 1.6.0
|
stream/release.py
|
stream/release.py
|
# coding=utf-8
"""
stream.release
~~~~~~~~~~~~~~
Include release information of the package.
:copyright: (c) 2016 by Ali Ghaffaari.
:license: MIT, see LICENSE for more details.
"""
# CONSTANTS ###################################################################
# Development statuses:
DS_PLANNING = 1
DS_PREALPHA = 2
DS_ALPHA = 3
DS_BETA = 4
DS_STABLE = 5
DS_MATURE = 6
DS_INACTIVE = 7
DS_STRING = {
DS_PLANNING: 'Development Status :: 1 - Planning',
DS_PREALPHA: 'Development Status :: 2 - Pre-Alpha',
DS_ALPHA: 'Development Status :: 3 - Alpha',
DS_BETA: 'Development Status :: 4 - Beta',
DS_STABLE: 'Development Status :: 5 - Production/Stable',
DS_MATURE: 'Development Status :: 6 - Mature',
DS_INACTIVE: 'Development Status :: 7 - Inactive'
}
###############################################################################
# Package release information.
__title__ = 'stream'
__description__ = 'Python implementation of stream library'
__author__ = 'Ali Ghaffaari'
__email__ = '[email protected]'
__license__ = 'MIT'
# Release
__version__ = '1.5.2'
__status__ = DS_BETA
# PyPI-related information
__keywords__ = 'stream protocol buffer protobuf'
__classifiers__ = [
# Development status
DS_STRING[__status__],
# License
'License :: OSI Approved :: MIT License',
# Supported Python versions.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
# Intended Audience and Topic
'Intended Audience :: Developers',
]
__requires__ = ['protobuf>=3.4.0', 'async_generator>=1.10', 'click>=6.0.0', 'future']
__tests_require__ = []
__extras_require__ = {
'test': ['nose>=1.0', 'coverage'],
}
__setup_requires__ = ['nose>=1.0', 'coverage']
__entry_points__ = '''
[console_scripts]
varint=stream.varint:cli
'''
|
Python
| 0.000001 |
@@ -1110,11 +1110,11 @@
'1.
-5.2
+6.0
'%0A__
@@ -1127,20 +1127,22 @@
__ = DS_
-BETA
+STABLE
%0A%0A# PyPI
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.