commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
e267bf384a33e17c48101e4726975dae31a88563
Fix docstring for correlations.
scikits/talkbox/tools/correlations.py
scikits/talkbox/tools/correlations.py
import numpy as np from scipy.fftpack import fft, ifft __all__ = ['nextpow2', 'acorr'] def nextpow2(n): """Return the next power of 2 such as 2^p >= n. Notes ----- Infinite and nan are left untouched, negative values are not allowed.""" if np.any(n < 0): raise ValueError("n should be > 0") if np.isscalar(n): f, p = np.frexp(n) if f == 0.5: return p-1 elif np.isfinite(f): return p else: return f else: f, p = np.frexp(n) res = f bet = np.isfinite(f) exa = (f == 0.5) res[bet] = p[bet] res[exa] = p[exa] - 1 return res def _acorr_last_axis(x, nfft, maxlag, onesided=False, scale='none'): a = np.real(ifft(np.abs(fft(x, n=nfft) ** 2))) if onesided: b = a[..., :maxlag] else: b = np.concatenate([a[..., nfft-maxlag+1:nfft], a[..., :maxlag]], axis=-1) #print b, a[..., 0][..., np.newaxis], b / a[..., 0][..., np.newaxis] if scale == 'coeff': return b / a[..., 0][..., np.newaxis] else: return b def acorr(x, axis=-1, onesided=False, scale='none'): """Compute autocorrelation of x along given axis. Parameters ---------- x : array-like signal to correlate. axis : int axis along which autocorrelation is computed. onesided: bool, optional if True, only returns the right side of the autocorrelation. scale: {'none', 'coeff'} scaling mode. If 'coeff', the correlation is normalized such as the 0-lag is equal to 1. Notes ----- Use fft for computation: is more efficient than direct computation for relatively large n. """ if not np.isrealobj(x): raise ValueError("Complex input not supported yet") if not scale in ['none', 'coeff']: raise ValueError("scale mode %s not understood" % scale) maxlag = x.shape[axis] nfft = 2 ** nextpow2(2 * maxlag - 1) if axis != -1: x = np.swapaxes(x, -1, axis) a = _acorr_last_axis(x, nfft, maxlag, onesided, scale) if axis != -1: a = np.swapaxes(a, -1, axis) return a
Python
0
@@ -172,17 +172,16 @@ -----%0A -%0A Infi @@ -1270,28 +1270,24 @@ ----------%0A - x : arra @@ -1293,20 +1293,16 @@ ay-like%0A - @@ -1322,28 +1322,24 @@ relate.%0A - - axis : int%0A @@ -1337,20 +1337,16 @@ s : int%0A - @@ -1391,20 +1391,16 @@ mputed.%0A - ones @@ -1428,20 +1428,16 @@ - - if True, @@ -1489,20 +1489,16 @@ lation.%0A - scal @@ -1526,20 +1526,16 @@ - scaling @@ -1602,20 +1602,16 @@ - - 0-lag is @@ -1644,20 +1644,16 @@ -----%0A - Use @@ -1719,20 +1719,16 @@ ion for%0A - rela
78d02e170f7f8d2b00a9420998d8ccaa98b67f0f
test commit-msg 3
versioner.py
versioner.py
#!/bin/env python import sys from optparse import OptionParser import os import re usage = "usage: %prog [options] file" version = "0.1.8.0" version_text = "%prog {}".format(version) opt = OptionParser(usage = usage, version = version_text) opt.add_option ("-l","--language" ,action = "store" ,dest = "language", default = 0 ,help = "manualy select the language") opt.add_option ("-s","--show" ,action = "store_true" ,dest = "show", default = False ,help = "show the current version of the file") opt.add_option ("","--major" ,action = "store_true" ,dest = "major", default = False ,help = "upgrade major version") opt.add_option ("","--minor" ,action = "store_true" ,dest = "minor", default = False ,help = "upgrade minor version") opt.add_option ("","--maintenance" ,action = "store_true" ,dest = "maintenance", default = False ,help = "upgrade maintenance version") opt.add_option ("","--build" ,action = "store_true" ,dest = "build", default = False ,help = "upgrade build version") opt.add_option ("-e","--no-error" ,action = "store_true" ,dest = "no_error", default = False ,help = "no version is not considered as error") opt.add_option ("-v","--version-only" ,action = "store_true" ,dest = "version_only", default = False ,help = "if showing, show only the current version") (options, args) = opt.parse_args() class Language: Unknown, Python, Haskell, Cpp, Rust = range(0,5) @staticmethod def languages(): l = [] for a in dir(Language): if not "__" in a and not a in ["parse", "languages", "Unknown"]: l.append(a) return ", ".join(l) @staticmethod def parse( text ): text = text.lower() d = { "python" : Language.Python, "haskell" : Language.Haskell, "cpp" : Language.Cpp, "rust" : Language.Rust, } if text in d: return d[text] for k,v in d.iteritems(): if text in k: return v return Language.Unknown try: options.file_path = args[0] except: # try .versionrc file try: with open(".versionrc", "r") as f: m = re.compile("MAIN_VERSION_FILE=(.*)").match(f.read()) if m: options.file_path = m.group(1) else: raise "no file path" except: sys.stderr.write("No input file!\n") exit(2) if not os.path.isfile(options.file_path): sys.stderr.write("{} not exists!\n".format(options.file_path)) exit(3) if options.language: lan = Language.parse(options.language) if lan == Language.Unknown: sys.stderr.write("Incorrect language, available languages: {}\n".format(Language.languages())) exit(1) options.language = lan else: if options.file_path == "Cargo.toml": options.language = Language.Rust else: _, ext = os.path.splitext(options.file_path) exts = { ".py" : Language.Python, ".cabal" : Language.Haskell, ".hpp" : Language.Cpp, ".cpp" : Language.Cpp, } options.language = exts.get(ext, Language.Unknown) if options.language == Language.Unknown: if options.no_error: print("Unknown language, cannot parse the file") exit(0) sys.stderr.write("Unknown language, cannot parse the file\n") exit(4) program_version_re = { Language.Python : re.compile("version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""), Language.Cpp : re.compile("string\s+version\s*=\s*\"(\d+)\.(\d+)\.(\d+).(\d+)\""), Language.Haskell : re.compile("version\s*:\s*(\d+)\.(\d+)\.(\d+).(\d+)"), Language.Rust : re.compile("version\s*=\s*\"(\d+)\.(\d+)\.(\d+)\""), } program_version_update = { Language.Python : "version = \"{}.{}.{}.{}\"", Language.Cpp : "string version = \"{}.{}.{}.{}\"", Language.Haskell : "version: {}.{}.{}.{}", Language.Rust : "version = \"{}.{}.{}\"", } def get_version(options): program_re = program_version_re[options.language] with open(options.file_path,"r") as f: lines = f.readlines() for line in lines: m = program_re.match(line) if m and m.groups == 4: return (m.group(0), int(m.group(1)),int(m.group(2)),int(m.group(3)),int(m.group(4))) elif m: return (m.group(0), int(m.group(1)),int(m.group(2)),int(m.group(3)),0) return None current_version = get_version(options) old_version = current_version if not current_version: if options.no_error: exit(0) else: exit(10) upgraded = False if options.major: t,m,_,_,_ = current_version current_version = (t, m + 1, 0, 0, 0) upgraded = True if options.minor: t,m,n,_,_ = current_version current_version = (t, m , n + 1, 0, 0) upgraded = True if options.maintenance: t,m,n,a,_ = current_version current_version = (t, m , n, a + 1, 0) upgraded = True if options.build: t,m,n,a,b = current_version current_version = (t, m , n, a, b + 1) upgraded = True if options.show: _,m,n,a,b = current_version _,om,on,oa,ob = old_version if options.version_only: print("{}.{}.{}.{}".format(m,n,a,b)) else: if upgraded: print ("Version has been upgraded from '{}.{}.{}.{}' to '{}.{}.{}.{}'".format(om,on,oa,ob,m,n,a,b)) else: print ("Current version is '{}.{}.{}.{}'".format(m,n,a,b)) exit(0) orig, major, minor, maintenance, build = current_version updated = program_version_update[options.language].format(major, minor, maintenance, build) text = None with open(options.file_path,"r") as f: text = f.read() text = text.replace(orig, updated) print (text)
Python
0.000001
@@ -2608,17 +2608,16 @@ read())%0A -%0A
45918f696ff43815a15640b080b68b007c27b2f8
Clean-up query
scripts/analytics/preprint_summary.py
scripts/analytics/preprint_summary.py
import pytz import logging from dateutil.parser import parse from datetime import datetime, timedelta from django.db.models import Q from website.app import init_app from scripts.analytics.base import SummaryAnalytics logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO) LOG_THRESHOLD = 11 class PreprintSummary(SummaryAnalytics): @property def collection_name(self): return 'preprint_summary' def get_events(self, date): super(PreprintSummary, self).get_events(date) from osf.models import PreprintService, PreprintProvider # Convert to a datetime at midnight for queries and the timestamp timestamp_datetime = datetime(date.year, date.month, date.day).replace(tzinfo=pytz.UTC) query_datetime = timestamp_datetime + timedelta(1) counts = [] for preprint_provider in PreprintProvider.objects.all(): preprint_for_provider_count = PreprintService.objects.filter(Q( node__isnull=False,node__is_deleted=False, provider___id=preprint_provider._id, date_created__lte=query_datetime)).count() counts.append({ 'keen': { 'timestamp': timestamp_datetime.isoformat() }, 'provider': { 'name': preprint_provider.name, 'total': preprint_for_provider_count, }, }) return counts def get_class(): return PreprintSummary if __name__ == '__main__': init_app() preprint_summary = PreprintSummary() args = preprint_summary.parse_args() yesterday = args.yesterday if yesterday: date = (datetime.today() - timedelta(1)).date() else: date = parse(args.date).date() if args.date else None events = preprint_summary.get_events(date) preprint_summary.send_events(events)
Python
0.999883
@@ -1013,16 +1013,33 @@ l=False, +%0A node__is @@ -1156,16 +1156,29 @@ atetime) +%0A ).count(
32aec3e5595fe0868b77260cb64be718d4e7f3b8
Update Keras.py
Momentum/Keras.py
Momentum/Keras.py
from keras.datasets import mnist from keras.initializers import RandomUniform from keras.layers import Dense from keras.models import Sequential from keras.optimizers import SGD from keras.utils import to_categorical batch_size = 128 epochs = 30 learning_rate = 0.1 momentum = 0.9 num_classes = 10 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784).astype('float32') / 255 y_train = to_categorical(y_train, num_classes) x_test = x_test.reshape(10000, 784).astype('float32') / 255 y_test = to_categorical(y_test, num_classes) model = Sequential() model.add(Dense(512, activation='tanh', input_shape=(784,), kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01))) model.add(Dense(512, activation='tanh', input_shape=(784,), kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01))) model.add(Dense(num_classes, activation='softmax', input_shape=(784,), kernel_initializer=RandomUniform(minval=-0.01, maxval=0.01))) model.summary() model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=learning_rate, momentum=momentum), metrics=['accuracy']) history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_data=(x_test, y_test))
Python
0
@@ -652,36 +652,36 @@ activation=' -tanh +relu ',%0D%0A @@ -838,12 +838,12 @@ on=' -tanh +relu ',%0D%0A @@ -1507,8 +1507,10 @@ y_test)) +%0D%0A
aa6090b69f64721391dec38de04e8d01d23c48bf
Add tests for differential calculus methods
sympy/calculus/tests/test_singularities.py
sympy/calculus/tests/test_singularities.py
from sympy import Symbol, exp, log from sympy.calculus.singularities import singularities from sympy.utilities.pytest import XFAIL def test_singularities(): x = Symbol('x', real=True) assert singularities(x**2, x) == () assert singularities(x/(x**2 + 3*x + 2), x) == (-2, -1) @XFAIL def test_singularities_non_rational(): x = Symbol('x', real=True) assert singularities(exp(1/x), x) == (0) assert singularities(log((x - 2)**2), x) == (2) @XFAIL def test_is_increasing(): pass @XFAIL def test_is_strictly_increasing(): pass @XFAIL def test_is_decreasing(): pass @XFAIL def test_is_strictly_decreasing(): pass @XFAIL def is_monotonic(): pass
Python
0.000003
@@ -69,16 +69,17 @@ import +( singular @@ -83,16 +83,235 @@ larities +, is_increasing,%0A is_strictly_increasing, is_decreasing,%0A is_strictly_decreasing)%0Afrom sympy.sets import Interval%0Afrom sympy import oo, S %0A%0Afrom s @@ -346,16 +346,33 @@ XFAIL%0A%0A +x = Symbol('x')%0A%0A %0Adef tes @@ -697,31 +697,24 @@ x) == (2)%0A%0A%0A -@XFAIL%0A def test_is_ @@ -735,74 +735,600 @@ -p ass -%0A%0A%0A@XFAIL%0Adef test_is_strictly_increasing():%0A pass%0A%0A%0A@XFAIL +ert is_increasing(x**3 - 3*x**2 + 4*x, S.Reals)%0A assert is_increasing(-x**2, Interval(-oo, 0))%0A assert not is_increasing(-x**2, Interval(0, oo))%0A assert not is_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval(-2, 3))%0A%0A%0Adef test_is_strictly_increasing():%0A assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Ropen(-oo, -2))%0A assert is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.Lopen(3, oo))%0A assert not is_strictly_increasing(4*x**3 - 6*x**2 - 72*x + 30, Interval.open(-2, 3))%0A assert not is_strictly_increasing(-x**2, Interval(0, oo))%0A%0A %0Adef @@ -1350,37 +1350,277 @@ sing():%0A -p ass -%0A%0A%0A@XFAIL +ert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))%0A assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))%0A assert not is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2))%0A assert not is_decreasing(-x**2, Interval(-oo, 0))%0A%0A %0Adef test_is @@ -1651,47 +1651,256 @@ -p ass -%0A%0A%0A@XFAIL%0Adef is_monotonic(): +ert is_decreasing(1/(x**2 - 3*x), Interval.open(1.5, 3))%0A assert is_decreasing(1/(x**2 - 3*x), Interval.Lopen(3, oo))%0A assert not is_decreasing(1/(x**2 - 3*x), Interval.Ropen(-oo, S(3)/2)) %0A -p ass +ert not is_decreasing(-x**2, Interval(-oo, 0)) %0A
f84f7e9091725d638e93d1dc14b830118a1833c8
add returns for views
gps_tracker/views.py
gps_tracker/views.py
from pyramid.view import view_config points_list = [ {"_id": 'ObjectId("52e3eb56a7cade5d0898e012")', "latitude": "45.215", "longitude": "14.131", "gas_station": "Lukoil", "odometer": "24100", "description": "Bad coffee"}, {"_id": 'ObjectId("52e3eb79a7cade5d0898e013")', "latitude": "47.412", "longitude": "16.112", "gas_station": "Shell", "odometer": "24300", "description": "Nice service, but fuel is more expensive"}, {"_id": 'ObjectId("52e3eba5a7cade5d0898e014")', "latitude": "48.544", "longitude": "17.001", "gas_station": "Руснефть", "odometer": "24500", "description": "На заправке есть гостиница и кафе. Очень хорошо"}, {"_id": 'ObjectId("52e3ec19a7cade5d0898e015")', "latitude": "49.165", "longitude": "18.125", "gas_station": "Татнефть", "odometer": "24750", "description": "Есть стоянка кемпинг-стоянка. Дешёвая незамерзайка."}, {"_id": 'ObjectId("52f3aaf0a7cade0d846d00d7")', "gas_station": "Газпром", "odometer": "28400", "latitude": "49.249", "description": "Хорошее кафе, есть душ!", "longitude": "19.100"} ] @view_config(route_name='points', request_method='GET', renderer='json') def points_get_all(request): return points_list @view_config(route_name='points', request_method='POST', renderer='json') def point_add_new(request): return points_list[2] @view_config(route_name='point', request_method='GET', renderer='json') def point_get_one(request): return points_list[0] @view_config(route_name='point', request_method='PATCH', renderer='json') def point_edit_one(request): return {'response': 'point edited'} @view_config(route_name='point', request_method='DELETE', renderer='json') def point_delete_one(request): return {}
Python
0
@@ -1593,36 +1593,22 @@ urn -%7B'response': 'point edited'%7D +points_list%5B1%5D %0A%0A%0A@
f15f15ae8d8629e2387a552ed6616ec2ccdab574
fix to poll.py, in python3 in raspberry, the cmd string to serial must be encode as UTF-8
greenery/bin/poll.py
greenery/bin/poll.py
#!/usr/bin/python3 """ Poll sensors to get temp/humid/soil-moisture etc... The Arduino accepts number-based command codes over the usb serial connection. Like, 002\n = 0=mode(get),0=measurement(temperature),2=address(digital pin 2) 0214\n = 0=mode(get),2=measurement(soil),14=address(analog pin 14, or A0) Commands MUST be terminated with '\n'! See command-map in global vars """ import os import sys import re import time import datetime import logging import serial sys.path.append( os.environ.get('GREENERY_WEB','/var/www/greenery') ) from greenery import db from greenery.apps.measurement.models import MeasurementType, Measurement from greenery.apps.admin.models import Setting from greenery.apps.sensor.models import Sensor logfile = '/var/tmp/greenery.errors.log' logging.basicConfig(filename=logfile) logger = logging.getLogger('actions') logger.setLevel(10) # global vars poll = None fahrenheit = None sdevice = '/dev/ttyUSB0' now = datetime.datetime.now().replace(second=0, microsecond=0) cmd_map = { # first char 'get': 0, 'set': 1, 'tx': 2, # second(*) char for get-mode 'temperature': 0, 'humidity': 1, 'soil': 2, } def main(): ser = None try: ser = serial.Serial(sdevice, 9600, 5) except Exception as x: logger.error(x) sys.stderr.write("Error! see log %s\n" % logfile) sys.exit(1) mtypes = MeasurementType.query.all() sensors = Sensor.query.all() for s in sensors: for typ in ('temperature', 'humidity', 'soil'): if re.search(typ, s.tags): cmd = "%d%d%d\n" % (cmd_map['get'], cmd_map[typ], s.address) ser.write(cmd.encode('UTF-8')) while True: # returns like; # line = "sm,14,22" (code, address, value) line = ser.readline() print(line) if re.search(r'^ok', line, re.IGNORECASE): # nothing more to read! break; if re.search(r'^fail', line, re.IGNORECASE): logger.warning("sensor '%s' fail result '%s'" % (s.name, line)) break; atoms = line.split(",") if len(atoms) != 3: logger.warning("sensor '%s' garbled output '%s" % (s.name, line)) continue; code,addr,val = atoms mt = match_flag_to_object(code, mtypes) if code == 't' and fahrenheit: val = val * 1.8 + 32 label = format_label(code, val, fahrenheit) # adjust for only one decimal place during write to db m = Measurement(mt.id, s.id, "%0.1f" % float(val), label, now) db.session.add(m) db.session.commit() ser.close() def match_flag_to_object(f, objects): label = None if f == 't': label = 'temperature' elif f == 'h': label = 'humidity' elif f == 'sm': label = 'soil moisture' else: return None for o in objects: if o.name == label: return o return None def format_label(typ, val, fahrenheit=False): if re.search(r'^t', typ): label = str(val) + u'\N{DEGREE SIGN}' if fahrenheit: label += "F" else: label += "C" return label if re.search(r'^(h|sm)', typ): label = str(val) + "%" return label return None if __name__ == '__main__': poll = Setting.query.filter(Setting.name == 'polling interval minutes').first() if not poll: logger.error("could not determine polling interval from db") sys.stderr.write("error\n") sys.exit(1) if now.minute % poll.value > 0: # not the right time to be running this. exit sys.exit(0) fahrenheit = bool(Setting.query.filter(Setting.name == 'store temperature fahrenheit').first().value) main()
Python
0.000175
@@ -1930,40 +1930,26 @@ ne() -%0A print(line) +.decode().strip()%0A %0A
d5f84783c376906dd5733391593ceae792b5edda
Bump version to 0.1.0
vcli/__init__.py
vcli/__init__.py
__version__ = '0.0.1'
Python
0.000001
@@ -14,9 +14,9 @@ '0. -0.1 +1.0 '%0A
7c09e8cad8892aa2a491297618b6091e7286c6d3
move get model code to method
exportdata/management/commands/exportdata.py
exportdata/management/commands/exportdata.py
import os import csv import sys from optparse import make_option from collections import Callable from django.contrib.sites.models import Site from django.db.models.loading import get_model from django.core.management.base import LabelCommand, CommandError DOMAIN = Site.objects.get_current().domain class Command(LabelCommand): option_list = LabelCommand.option_list + ( make_option('--fields', dest='fields'), make_option('--filters', dest='filters', default=None), make_option('--ordering', dest='ordering', default=None), # TODO: advanced filtration, ranges ) help = 'Export any data in csv' label = 'app.model' def handle_label(self, label, **options): app, model = label.split('.', 1) Model = get_model(app, model) if not Model: raise CommandError('Model "{0}" not found!'.format(label)) filename = os.path.join(os.path.expanduser('~'), '{0}.csv'.format(label)) resultcsv = csv.writer(open(filename, 'wb'), delimiter=';', quoting=csv.QUOTE_MINIMAL) fields = options.get('fields') filters = options.get('filters', None) ordering = options.get('ordering', None) qs = Model.objects.all() if filters: filters = filters.split(',') for filter_name in filters: if not hasattr(qs, filter_name): raise CommandError( 'Model "{0}" not not to have "{1}" filter'.format( label, filter_name ) ) qs = getattr(qs, filter_name)() if ordering: ordering = ordering.split(',') qs = qs.order_by(*ordering) fields = fields.split(',') resultcsv.writerow(fields) for obj in qs: result = [] for field_name in fields: if '__' in field_name: field_name = field_name.split('__', 1) field = getattr(obj, field_name[0], None) field = getattr(field, field_name[1], None) else: field = getattr(obj, field_name, None) if field_name == 'get_absolute_url': # hack, because in python not possible # check function has a decorator field = field() field = u'http://{0}{1}'.format(DOMAIN, field) if isinstance(field, Callable): field = field() if isinstance(field, (str, unicode,)): field = field.encode('utf-8') result.append(field) resultcsv.writerow(result) sys.exit('Done! Exported objects: {0}'.format(qs.count()))
Python
0
@@ -676,18 +676,15 @@ def -handle_lab +get_mod el(s @@ -697,19 +697,8 @@ abel -, **options ):%0A @@ -867,16 +867,122 @@ (label)) +%0A return Model%0A%0A def handle_label(self, label, **options):%0A Model = self.get_model(label) %0A%0A
654022be05ca24596b643a891d11ea26a2ab393a
drop todos
corehq/apps/performance_sms/forms.py
corehq/apps/performance_sms/forms.py
import copy from corehq.apps.groups.fields import GroupField from django import forms from django.utils.translation import ugettext as _, ugettext_lazy from corehq.apps.app_manager.fields import ApplicationDataSourceUIHelper from corehq.apps.performance_sms import parser from corehq.apps.performance_sms.exceptions import InvalidParameterException from corehq.apps.performance_sms.models import TemplateVariable, ScheduleConfiguration, SCHEDULE_CHOICES from corehq.apps.reports.daterange import get_simple_dateranges from corehq.apps.userreports.ui.fields import JsonField from crispy_forms.bootstrap import StrictButton from crispy_forms.helper import FormHelper from crispy_forms.layout import Layout import corehq.apps.style.crispy as hqcrispy class PerformanceMessageEditForm(forms.Form): recipient_id = forms.CharField() schedule = forms.ChoiceField(choices=[(choice, ugettext_lazy(choice)) for choice in SCHEDULE_CHOICES]) template = forms.CharField(widget=forms.Textarea) time_range = forms.ChoiceField( choices=[(choice.slug, choice.description) for choice in get_simple_dateranges()] ) def __init__(self, domain, config, *args, **kwargs): self.domain = domain self.config = config def _to_initial(config): initial = copy.copy(config._doc) initial['schedule'] = config.schedule.interval if config.template_variables: # todo: needs to support multiple sources initial['application'] = config.template_variables[0].app_id initial['source'] = config.template_variables[0].source_id initial['time_range'] = config.template_variables[0].time_range return initial super(PerformanceMessageEditForm, self).__init__(initial=_to_initial(config), *args, **kwargs) self.fields['recipient_id'] = GroupField(domain=domain, label=_('Recipient Group')) self.app_source_helper = ApplicationDataSourceUIHelper(enable_cases=False) self.app_source_helper.bootstrap(self.domain) data_source_fields = self.app_source_helper.get_fields() self.fields.update(data_source_fields) self.helper = _get_default_form_helper() form_layout = self.fields.keys() form_layout.append( hqcrispy.FormActions( StrictButton( _("Save Changes"), type="submit", css_class="btn btn-primary", ), ) ) self.helper.layout = Layout( *form_layout ) def clean_template(self): return _clean_template(self.cleaned_data['template']) def clean_schedule(self): # todo: support other scheduling options return ScheduleConfiguration(interval=self.cleaned_data['schedule']) def save(self, commit=True): self.config.recipient_id = self.cleaned_data['recipient_id'] self.config.schedule = self.cleaned_data['schedule'] self.config.template = self.cleaned_data['template'] # todo: support more than one data source template_variable = TemplateVariable( type=self.cleaned_data['source_type'], time_range=self.cleaned_data['time_range'], source_id=self.cleaned_data['source'], app_id=self.cleaned_data['application'], ) self.config.template_variables = [template_variable] if commit: self.config.save() return self.config @property def app_id(self): # todo: need to support multiple sources if self.config.template_variables: return self.config.template_variables[0].app_id return '' @property def source_id(self): if self.config.template_variables: return self.config.template_variables[0].source_id return '' def _get_default_form_helper(): helper = FormHelper() helper.label_class = 'col-sm-3 col-md-2' helper.field_class = 'col-sm-9 col-md-8 col-lg-6' helper.form_class = "form-horizontal" helper.form_id = "performance-form" helper.form_method = 'post' return helper def _clean_template(template): try: parser.validate(template) except InvalidParameterException as e: raise forms.ValidationError(unicode(e)) return template class AdvancedPerformanceMessageEditForm(forms.Form): recipient_id = forms.CharField() schedule = forms.ChoiceField(choices=[(choice, ugettext_lazy(choice)) for choice in SCHEDULE_CHOICES]) template = forms.CharField(widget=forms.Textarea) template_variables = JsonField(expected_type=list) def __init__(self, domain, config, *args, **kwargs): self.domain = domain self.config = config super(AdvancedPerformanceMessageEditForm, self).__init__(initial=config.to_json(), *args, **kwargs) self.fields['recipient_id'] = GroupField(domain=domain, label=_('Recipient Group')) self.helper = _get_default_form_helper() form_layout = self.fields.keys() form_layout.append( hqcrispy.FormActions( StrictButton( _("Save Changes"), type="submit", css_class="btn btn-primary", ), ) ) self.helper.layout = Layout( *form_layout ) def clean_template(self): return _clean_template(self.cleaned_data['template']) def clean_schedule(self): # todo: support other scheduling options return ScheduleConfiguration(interval=self.cleaned_data['schedule']) def save(self, commit=True): self.config.recipient_id = self.cleaned_data['recipient_id'] self.config.schedule = self.cleaned_data['schedule'] self.config.template = self.cleaned_data['template'] self.config.template_variables = self.cleaned_data['template_variables'] if commit: self.config.save() return self.config def clean_template_variables(self): template_vars = self.cleaned_data['template_variables'] if not isinstance(template_vars, list): raise forms.ValidationError(_('Template variables must be a list!')) wrapped_vars = [] for var in template_vars: try: wrapped = TemplateVariable.wrap(var) wrapped.validate() wrapped_vars.append(wrapped) except Exception as e: raise forms.ValidationError(_(u'Problem wrapping template variable! {}').format(e)) return wrapped_vars
Python
0
@@ -1419,66 +1419,8 @@ es:%0A - # todo: needs to support multiple sources%0A @@ -3021,58 +3021,8 @@ e'%5D%0A - # todo: support more than one data source%0A @@ -3463,57 +3463,8 @@ f):%0A - # todo: need to support multiple sources%0A
1587598a16b5822dcb87e95cfc131c04d4625d53
Add delete_all_repeaters util
corehq/apps/repeaters/dbaccessors.py
corehq/apps/repeaters/dbaccessors.py
from dimagi.utils.parsing import json_format_datetime from corehq.util.couch_helpers import paginate_view from corehq.util.test_utils import unit_testing_only from .const import RECORD_PENDING_STATE, RECORD_FAILURE_STATE, RECORD_SUCCESS_STATE def get_pending_repeat_record_count(domain, repeater_id): return get_repeat_record_count(domain, repeater_id, RECORD_PENDING_STATE) def get_failure_repeat_record_count(domain, repeater_id): return get_repeat_record_count(domain, repeater_id, RECORD_FAILURE_STATE) def get_success_repeat_record_count(domain, repeater_id): return get_repeat_record_count(domain, repeater_id, RECORD_SUCCESS_STATE) def get_repeat_record_count(domain, repeater_id=None, state=None): from .models import RepeatRecord startkey = [domain] endkey = [domain, {}] if repeater_id and not state: startkey = [domain, repeater_id] endkey = [domain, repeater_id, {}] elif repeater_id and state: startkey = [domain, repeater_id, state] endkey = [domain, repeater_id, state] elif not repeater_id and state: ids = sorted(_get_repeater_ids_by_domain(domain)) if not ids: return 0 startkey = [domain, ids[0], state] endkey = [domain, ids[-1], state] result = RepeatRecord.get_db().view('receiverwrapper/repeat_records', startkey=startkey, endkey=endkey, include_docs=False, reduce=True, ).one() return result['value'] if result else 0 def get_paged_repeat_records(domain, skip, limit, repeater_id=None, state=None): from .models import RepeatRecord kwargs = { 'include_docs': True, 'reduce': False, 'limit': limit, 'skip': skip, } if repeater_id and not state: kwargs['startkey'] = [domain, repeater_id] kwargs['endkey'] = [domain, repeater_id, {}] elif repeater_id and state: kwargs['startkey'] = [domain, repeater_id, state] kwargs['endkey'] = [domain, repeater_id, state] elif not repeater_id and state: kwargs['key'] = [domain, None, state] elif not repeater_id and not state: kwargs['startkey'] = [domain] kwargs['endkey'] = [domain, {}] results = RepeatRecord.get_db().view('receiverwrapper/repeat_records', **kwargs ).all() return [RepeatRecord.wrap(result['doc']) for result in results] def get_repeaters_by_domain(domain): from .models import Repeater results = Repeater.get_db().view('receiverwrapper/repeaters', startkey=[domain], endkey=[domain, {}], include_docs=True, reduce=False, ).all() return [Repeater.wrap(result['doc']) for result in results] def _get_repeater_ids_by_domain(domain): from .models import Repeater results = Repeater.get_db().view('receiverwrapper/repeaters', startkey=[domain], endkey=[domain, {}], include_docs=False, reduce=False, ).all() return [result['id'] for result in results] def iterate_repeat_records(due_before, chunk_size=10000, database=None): from .models import RepeatRecord json_now = json_format_datetime(due_before) view_kwargs = { 'reduce': False, 'startkey': [None], 'endkey': [None, json_now, {}], 'include_docs': True } for doc in paginate_view( RepeatRecord.get_db(), 'receiverwrapper/repeat_records_by_next_check', chunk_size, **view_kwargs): yield RepeatRecord.wrap(doc['doc']) @unit_testing_only def delete_all_repeat_records(): from .models import RepeatRecord results = RepeatRecord.get_db().view('receiverwrapper/repeat_records_by_next_check', reduce=False).all() for result in results: try: repeat_record = RepeatRecord.get(result['id']) except Exception: pass else: repeat_record.delete()
Python
0.000001
@@ -3953,12 +3953,254 @@ rd.delete()%0A +%0A%0A@unit_testing_only%0Adef delete_all_repeaters():%0A from .models import Repeater%0A for repeater in Repeater.get_db().view('receiverwrapper/repeaters', reduce=False, include_docs=True).all():%0A Repeater.wrap(repeater%5B'doc'%5D).delete()%0A
680122f69c5aab9be4dc1965024ac882326d1c5b
Add license to watched.py
derpibooru/watched.py
derpibooru/watched.py
class Watched(object): def __init__(self, key, page=1, perpage=15, comments=False, fav=False): self.__parameters = {} self.key = key self.page = page self.perpage = perpage self.comments = comments self.fav = fav @property def hostname(self): return("https://derpiboo.ru") @property def key(self): return(self.parameters["key"]) @key.setter def key(self, key=""): if not isinstance(key, str): raise TypeError("key must be a string") self.__parameters["key"] = key @property def page(self): return(self.parameters["page"]) @page.setter def page(self, page=1): if not isinstance(page, int): raise TypeError("page number must be an int") if page < 1: raise ValueError("page number must be greater than 0") self.__parameters["page"] = page def next_page(self, number=1): if not isinstance(number, int): raise TypeError("page number must be an int") if number < 1: raise ValueError("page number must be greater than 0") self.__parameters["page"] += number def previous_page(self, number=1): if not isinstance(number, int): raise TypeError("page number must be an int") if number < 1: raise ValueError("page number must be greater than 0") if self.parameters["page"] - number <= 1: self.__parameters["page"] = 1 else: self.__parameters["page"] -= number @property def perpage(self): return(self.parameters["perpage"]) @perpage.setter def perpage(self, page_size): if not isinstance(page_size, int): raise TypeError("perpage must be an int") if page_size not in range(1, 51): raise ValueError("perpage must be within range of 1-50") self.__parameters["perpage"] = page_size @property def comments(self): return(self.parameters["comments"]) @comments.setter def comments(self, comments=True): if not isinstance(comments, bool): raise TypeError("comments must be either True or False") self.__parameters["comments"] = comments @property def fav(self): return(self.parameters["fav"]) @fav.setter def fav(self, fav=True): if not isinstance(fav, bool): raise TypeError("favorites must be either True or False") self.__parameters["fav"] = fav @property def parameters(self): return(self.__parameters) @property def url(self): url, parameters = self.hostname + "images/watched.json", [] parameters.append("key={0}".format(self.key)) parameters.append("perpage={0}".format(self.perpage)) parameters.append("page={0}".format(self.page)) if self.comments == True: parameters.append("comments=") if self.fav == True: parameters.append("fav=") url += "?{0}".format("&".join(parameters)) return(url)
Python
0
@@ -1,8 +1,1345 @@ +# Copyright (c) 2014, Joshua Stone%0A# All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions are met:%0A#%0A# * Redistributions of source code must retain the above copyright notice, this%0A# list of conditions and the following disclaimer.%0A#%0A# * Redistributions in binary form must reproduce the above copyright notice,%0A# this list of conditions and the following disclaimer in the documentation%0A# and/or other materials provided with the distribution.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS %22AS IS%22%0A# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE%0A# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE%0A# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE%0A# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL%0A# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR%0A# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER%0A# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,%0A# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE%0A# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A %0Aclass W @@ -3772,16 +3772,17 @@ name + %22 +/ images/w @@ -4153,8 +4153,132 @@ rn(url)%0A +%0A @property%0A def random(self):%0A url = self.hostname + %22/images/watched.json?random=y&key=%22 + self.key%0A%0A return(url)%0A
42e26737d083b82716c3adb8c19fb66a5063dc65
change version number to v3.0.1
src/cmdlr/info.py
src/cmdlr/info.py
"""Cmdlr infomation files.""" VERSION = '3.0.0' DESCRIPTION = ('An extensible comic subscriber.') LICENSE = 'MIT' AUTHOR = 'Civalin' AUTHOR_EMAIL = '[email protected]' PROJECT_URL = 'https://github.com/civalin/cmdlr' PROJECT_NAME = 'cmdlr'
Python
0.000123
@@ -39,17 +39,17 @@ = '3.0. -0 +1 '%0ADESCRI
ee7c06579551f89296d60a1929357d637d5c9389
Update yeti.py
misp_modules/modules/expansion/yeti.py
misp_modules/modules/expansion/yeti.py
import json import logging try: import pyeti except ImportError: print("pyeti module not installed.") from pymisp import MISPEvent, MISPObject, MISPAttribute misperrors = {'error': 'Error'} mispattributes = {'input': ['ip-src', 'ip-dst', 'hostname', 'domain'], 'format': 'misp_standard' } # possible module-types: 'expansion', 'hover' or both moduleinfo = {'version': '1', 'author': 'Sebastien Larinier @sebdraven', 'description': 'Query on yeti', 'module-type': ['expansion', 'hover']} moduleconfig = ['apikey', 'url'] class Yeti(): def __init__(self, url, key,attribute): self.misp_mapping = {'Ip': 'ip-dst', 'Domain': 'domain', 'Hostname': 'hostname', 'Url': 'url', 'AutonomousSystem': 'AS'} self.yeti_client = pyeti.YetiApi(url=url, api_key=key) self.attribute = attribute self.misp_event = MISPEvent() self.misp_event.add_attribute(**attribute) def search(self, value): obs = self.yeti_client.observable_search(value=value) if obs: return obs[0] def get_neighboors(self, obs_id): neighboors = self.yeti_client.neighbors_observables(obs_id) if neighboors and 'objs' in neighboors: for n in neighboors['objs']: yield n def get_tags(self, value): obs = self.search(value) if obs: for t in obs['tags']: yield t def get_entity(self, obs_id): companies = self.yeti_client.observable_to_company(obs_id) actors = self.yeti_client.observable_to_actor(obs_id) campaigns = self.yeti_client.observable_to_campaign(obs_id) exploit_kit = self.yeti_client.observable_to_exploitkit(obs_id) exploit = self.yeti_client.observable_to_exploit(obs_id) ind = self.yeti_client.observable_to_indicator(obs_id) res = [] res.extend(companies) res.extend(actors) res.extend(campaigns) res.extend(exploit) res.extend(exploit_kit) res.extend(ind) for r in res: yield r['name'] def parse_yeti_result(self): obs = self.search(self.attribute['value']) values = [] types = [] for obs_to_add in self.get_neighboors(obs['id']): object_misp_domain_ip = self.__get_object_domain_ip(obs_to_add) if object_misp_domain_ip: self.misp_event.add_object(object_misp_domain_ip) object_misp_url = self.__get_object_url(obs_to_add) if object_misp_url: self.misp_event.add_object(object_misp_url) if not object_misp_url and not object_misp_url: attr = self.__get_attribute(obs_to_add) if attr: self.misp_event.add_attribute(attr.type, attr.value, tags=attr.tags) def get_result(self): event = json.loads(self.misp_event.to_json()) results = {key: event[key] for key in ('Attribute', 'Object')} return results def __get_attribute(self, obs_to_add): attr = MISPAttribute() attr.value = obs_to_add['value'] try: attr.type = self.misp_mapping[obs_to_add['type']] except KeyError: logging.error('type not found %s' % obs_to_add['type']) return attr.tags.extend([t['name'] for t in obs_to_add['tags']]) return attr def __get_object_domain_ip(self, obj_to_add): if (obj_to_add['type'] == 'Ip' and self.attribute['type'] in ['hostname','domain']) or\ (obj_to_add['type'] in ('Hostname', 'Domain') and self.attribute['type'] in ('ip-src', 'ip-dst')): domain_ip_object = MISPObject('domain-ip') domain_ip_object.add_attribute(self.__get_relation(obj_to_add), obj_to_add['value']) domain_ip_object.add_attribute(self.__get_relation(self.attribute, is_yeti_object=False), self.attribute['value']) domain_ip_object.add_reference(self.attribute['uuid'], 'related_to') return domain_ip_object def __get_object_url(self, obj_to_add): if (obj_to_add['type'] == 'Url' and self.attribute['type'] in ['hostname', 'domain', 'ip-src', 'ip-dst']) or ( obj_to_add['type'] in ('Hostname', 'Domain', 'Ip') and self.attribute['type'] == 'url' ): url_object = MISPObject('url') obj_relation = self.__get_relation(obj_to_add) if obj_relation: url_object.add_attribute(obj_relation, obj_to_add['value']) obj_relation = self.__get_relation(self.attribute, is_yeti_object=False) if obj_relation: url_object.add_attribute(obj_relation, self.attribute['value']) url_object.add_reference(self.attribute['uuid'], 'related_to') return url_object def __get_relation(self, obj, is_yeti_object=True): if is_yeti_object: type_attribute = self.misp_mapping[obj['type']] else: type_attribute = obj['type'] if type_attribute == 'ip-src' or type_attribute == 'ip-dst': return 'ip' elif 'domain' == type_attribute: return 'domain' elif 'hostname' == type_attribute: return 'domain' elif type_attribute == 'url': return type_attribute def handler(q=False): if q is False: return False apikey = None yeti_url = None yeti_client = None request = json.loads(q) attribute = request['attribute'] if attribute['type'] not in mispattributes['input']: return {'error': 'Unsupported attributes type'} if 'config' in request and 'url' in request['config']: yeti_url = request['config']['url'] if 'config' in request and 'apikey' in request['config']: apikey = request['config']['apikey'] if apikey and yeti_url: yeti_client = Yeti(yeti_url, apikey, attribute) if yeti_client: yeti_client.parse_yeti_result() return {'results': yeti_client.get_result()} else: misperrors['error'] = 'Yeti Config Error' return misperrors def version(): moduleinfo['config'] = moduleconfig return moduleinfo def introspection(): return mispattributes
Python
0
@@ -3387,24 +3387,25 @@ return%0A +%0A attr.tag @@ -3400,63 +3400,74 @@ -attr.tags.extend(%5Bt%5B'name'%5D for t in obs_to_add%5B'tags'%5D +for t in obs_to_add%5B'tags'%5D:%0A attr.tags.append(t%5B'name' %5D)%0A
49e301ac6a74a30cfdf00bf4178889f9ecb74889
Patch release for bug-fix #166
vtki/_version.py
vtki/_version.py
""" version info for vtki """ # major, minor, patch version_info = 0, 18, 1 # Nice string for the version __version__ = '.'.join(map(str, version_info))
Python
0
@@ -67,17 +67,17 @@ 0, 18, -1 +2 %0A%0A# Nice
4c6ec1413d1a12165c1231095783aa94d235389a
Add __version__ to vumi package.
vumi/__init__.py
vumi/__init__.py
Python
0.000001
@@ -0,0 +1,69 @@ +%22%22%22%0AVumi scalable text messaging engine.%0A%22%22%22%0A%0A__version__ = %220.5.0a%22%0A
c775006de5b4f984356052e2414e8a64d75d0e6b
Update Ridge_Kernel.py
KernelModel/Ridge_Kernel.py
KernelModel/Ridge_Kernel.py
import numpy as np import scipy as sp from MachineLearning.KernelModel import KernelCalc class KerRidgeReg: ########################################################################################################################## # Kernel Ridge Regression/Gaussian process # # # # Louis-Francois Arsenault, Columbia Universisty (2013-2017), [email protected] # ########################################################################################################################## # # # INPUTS: # # # ########################################################################################################################## def __init__(self): pass def train(self,X,y,lam,var,typeK,typeD,xinterval=None): self.Nlearn = X.shape[0] self.Nfeat = X.shape[1] self.X = X.copy() self.lam = lam self.var = var.copy() self.typeK = typeK self.typeD = typeD self.xinterval=xinterval self.Ker = KernelCalc(X,X,self.Nlearn,self.Nlearn,self.var,typeK=self.typeK,typeD=self.typeD,T=True,xinterval=self.xinterval) Klam = self.Ker + lam*np.identity(self.Nlearn) try: self.L = np.linalg.cholesky(Klam) self.alpha=sp.linalg.cho_solve((self.L,True),y) except np.linalg.linalg.LinAlgError: print 'K+lambda*I not positive definite, solving anyway, but beware!!' #postvar in query will not work, need to be corrected self.alpha = np.linalg.solve(Klam,y) self.L=None def train_withKer(self,X,y,lam,var,Ker,typeK,typeD): #If the Kernel matrix is already provided self.Nlearn = X.shape[0] self.Nfeat = X.shape[1] self.X = X.copy() self.lam = lam self.var = var.copy() self.typek = typeK self.typeD = typeD self.Ker = Ker Klam = self.Ker + lam*np.identity(self.Nlearn) #Is it faster to do it at once ( np.linalg.solve(Klam,y) ) ? try: self.L = np.linalg.cholesky(Klam) self.alpha=sp.linalg.cho_solve((self.L,True),y) except np.linalg.linalg.LinAlgError: print 'K+lambda*I not positive definite, solving anyway, but beware!!'#postvar in query will not work, need to be corrected self.alpha = np.linalg.solve(Klam,y) self.L=None def query(self,Xt,postVar=False): KerTest = KernelCalc(self.X,Xt,self.Nlearn,Xt.shape[0],self.var,typeK=self.typeK,typeD=self.typeD,T=False,xinterval=self.xinterval) if postVar is False: return KerTest.dot(self.alpha) elif postVar is True: #return the Gaussian process posterior variance change k_test^T(K+lambda*I)^-1k_test if self.L is None: print 'K+lambda*I not positive definite and thus there exist no Cholesky decomposition, the posterior variance is not returned' return KerTest.dot(self.alpha), None else: v=np.linalg.solve(self.L,KerTest.transpose()) return KerTest.dot(self.alpha), v.transpose().dot(v) def query_withKer(self,Xt,KerTest,postVar=False): if postVar is False: return KerTest.dot(self.alpha) elif postVar is True: #return the Gaussian process posterior variance change k_test^T(K+lambda*I)^-1k_test if self.L is None: print 'K+lambda*I not positive definite and thus there exist no Cholesky decomposition, the posterior variance is not returned' return KerTest.dot(self.alpha), None else: v=np.linalg.solve(self.L,KerTest.transpose()) return KerTest.dot(self.alpha), v.transpose().dot(v) def score(self,ypredic,ytrue,metric='MAE'): #Calculate the mean absolute error MAE by default, mean square error MSE #ypredic = self.query(Xt) if metric == 'MAE': #need to implement for multiple outputs return np.mean(np.absolute(ypredic-ytrue)) elif metric == 'MSE': ydiff = (ypredic-ytrue) return np.mean(ydiff**2) elif metric == 'MDAE': #need to implement for multiple outputs return np.median(np.absolute(ypredic-ytrue)) #Adding the possibility of hyperparameters determination by maximizing log-likelihood. def __LogLike_neg(self,mult=False,Nout=1.): #negative log-likelyhood if mult==True: return 0.5*self.Nlearn*np.linalg.det(self.y.transpose().dot(self.alpha)) + 2.*Nout*np.sum(np.log(np.diag(self.L))) else: return 0.5*self.y.transpose().dot(self.alpha) + np.sum(np.log(np.diag(self.L)))
Python
0
@@ -2375,86 +2375,8 @@ n)%0A%0A - #Is it faster to do it at once ( np.linalg.solve(Klam,y) ) ?%0A %09%09tr
8adb539d04660bc058dca6cc6eb8bc19993f6d30
update BeerEX.bot.py
BeerEX.bot.py
BeerEX.bot.py
#!/usr/bin/env python # coding=utf-8 from telegram import ReplyKeyboardRemove, KeyboardButton, ReplyKeyboardMarkup, ParseMode from telegram.ext import MessageHandler, Filters, Updater, CommandHandler from emoji import emojize import logging import clips import os # Enable logging logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def start(bot, update): """ Sends a welcome message when the command /start is issued. """ clips.Reset() clips.Run() # Get the initial UI state initial_fact = clips.Eval('(find-fact ((?f UI-state)) (eq ?f:state initial))') update.message.reply_text(text='Hello {}! 🤖 '.format(update.message.from_user.first_name), reply_markup=ReplyKeyboardRemove()) update.message.reply_text(initial_fact[0].Slots['display']) def new(bot, update): """ Starts a new chat with the beer expert when the command /new is issued. """ clips.Reset() clips.Run() nextUIState(bot, update) def nextUIState(bot, update): """ Re-creates the dialog window to match the current state in working memory. """ # Get the state-list fact_list = clips.Eval('(find-all-facts ((?f state-list)) TRUE)') if len(fact_list) == 0: return current_id = fact_list[0].Slots['current'] # Get the current UI state fact_list = clips.Eval('(find-all-facts ((?f UI-state)) (eq ?f:id %s))' % current_id) if len(fact_list) == 0: return state = fact_list[0].Slots['state'] if state == 'initial': clips.Assert('(next %s)' % current_id) clips.Run() nextUIState(bot, update) elif state == 'final': results = fact_list[0].Slots['display'] keyboard = [[KeyboardButton(text=emojize(':back: Previous', use_aliases=True))], [KeyboardButton(text=emojize(':repeat: Restart', use_aliases=True))], [KeyboardButton(text=emojize(':x: Cancel', use_aliases=True))]] reply_markup = ReplyKeyboardMarkup(keyboard) update.message.reply_text(text=results, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=True, reply_markup=reply_markup) else: question = fact_list[0].Slots['display'] valid_answers = fact_list[0].Slots['valid-answers'] keyboard = [] for answer in valid_answers: keyboard.append([KeyboardButton(text=answer)]) keyboard.append([KeyboardButton(text=emojize(':back: Previous', use_aliases=True))]) keyboard.append([KeyboardButton(text=emojize(':x: Cancel', use_aliases=True))]) reply_markup = ReplyKeyboardMarkup(keyboard) update.message.reply_text(text=question, reply_markup=reply_markup) dispatcher.add_handler(MessageHandler(Filters.text, handleEvent)) def handleEvent(bot, update): """ Triggers the next state in working memory based on which button was pressed. """ # Get the state-list fact_list = clips.Eval('(find-all-facts ((?f state-list)) TRUE)') if len(fact_list) == 0: return current_id = fact_list[0].Slots['current'] # Get the current UI state fact_list = clips.Eval('(find-all-facts ((?f UI-state)) (eq ?f:id %s))' % current_id) if len(fact_list) == 0: return valid_answers = fact_list[0].Slots['valid-answers'] if update.message.text in valid_answers: clips.Assert('(next %s %s)' % (current_id, update.message.text)) clips.Run() nextUIState(bot, update) elif update.message.text == emojize(':back: Previous', use_aliases=True): clips.Assert('(prev %s)' % current_id) clips.Run() nextUIState(bot, update) elif update.message.text == emojize(':repeat: Restart', use_aliases=True): new(bot, update) elif update.message.text == emojize(':x: Cancel', use_aliases=True): cancel(bot, update) def cancel(bot, update): """ Ends the chat with the beer expert when the command /cancel is issued. """ update.message.reply_text(text='Bye! I hope we can talk again some day. 👋🏻', reply_markup=ReplyKeyboardRemove()) clips.Reset() def unknown(bot, update): """ Sends an error message when an unrecognized command is typed. """ bot.send_message(chat_id=update.message.chat_id, text='Unrecognized command. Say what?') def error(bot, update, error): """ Log errors caused by updates. """ logger.warning('Update %s caused error %s' % (update, error)) if __name__ == '__main__': # Load the Beer EXpert system clips.Load('./clips/beerex.clp') token = open('token', 'r').read() # Create the updater and pass it the bot's token. updater = Updater(token) # Get the dispatcher to register handlers dispatcher = updater.dispatcher # Add command and message handlers dispatcher.add_handler(CommandHandler('start', start)) dispatcher.add_handler(CommandHandler('new', new)) dispatcher.add_handler(CommandHandler('cancel', cancel)) dispatcher.add_handler(MessageHandler(Filters.command, unknown)) # Log all errors dispatcher.add_error_handler(error) updater.start_webhook(listen="0.0.0.0", port=int(os.environ.get('PORT', '5000')), url_path=token) updater.bot.set_webhook("https://beerex-telegram-bot.herokuapp.com/" + token) # Start the bot updater.start_polling() # Run the bot until you press Ctrl-C or the process receives SIGINT, # SIGTERM or SIGABRT. This should be used most of the time, since # start_polling() is non-blocking and will stop the bot gracefully. updater.idle()
Python
0
@@ -5059,28 +5059,8 @@ Add -command and message hand
3bb474a4506abb569d5c54703ba3bf2c9c933fd9
Add tof-server to path
tof-server.wsgi
tof-server.wsgi
activate_this = '/var/www/tof-server/flask/bin/activate_this.py' execfile(activate_this, dict(__file__=activate_this)) #activator = 'some/path/to/activate_this.py' #with open(activator) as f: # exec(f.read(), {'__file__': activator}) from tof_server import app as application
Python
0.000001
@@ -1,20 +1,32 @@ +import sys%0A%0A activate_this = '/va @@ -125,16 +125,56 @@ this))%0A%0A +sys.path.append('/var/www/tof-server')%0A%0A #activat
92eec11b7a2531632d7e67e6c65f3db686af2322
update BeerEX.bot.py
BeerEX.bot.py
BeerEX.bot.py
#!/usr/bin/env python # coding=utf-8 from telegram import ReplyKeyboardRemove, KeyboardButton, ReplyKeyboardMarkup, ParseMode from telegram.ext import MessageHandler, Filters, Updater, CommandHandler from emoji import emojize import logging import clips import os # Enable logging logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO) logger = logging.getLogger(__name__) def start(bot, update): """ Sends a welcome message when the command /start is issued. """ clips.Reset() clips.Run() # Get the initial UI state initial_fact = clips.Eval('(find-fact ((?f UI-state)) (eq ?f:state initial))') update.message.reply_text(text='Hello {}! 🤖 '.format(update.message.from_user.first_name), reply_markup=ReplyKeyboardRemove()) update.message.reply_text(initial_fact[0].Slots['display']) def new(bot, update): """ Starts a new chat with the beer expert when the command /new is issued. """ clips.Reset() clips.Run() nextUIState(bot, update) def nextUIState(bot, update): """ Re-creates the dialog window to match the current state in working memory. """ # Get the state-list fact_list = clips.Eval('(find-all-facts ((?f state-list)) TRUE)') if len(fact_list) == 0: return current_id = fact_list[0].Slots['current'] # Get the current UI state fact_list = clips.Eval('(find-all-facts ((?f UI-state)) (eq ?f:id %s))' % current_id) if len(fact_list) == 0: return state = fact_list[0].Slots['state'] if state == 'initial': clips.Assert('(next %s)' % current_id) clips.Run() nextUIState(bot, update) elif state == 'final': results = fact_list[0].Slots['display'] keyboard = [[KeyboardButton(text=emojize(':back: Previous', use_aliases=True))], [KeyboardButton(text=emojize(':repeat: Restart', use_aliases=True))], [KeyboardButton(text=emojize(':x: Cancel', use_aliases=True))]] reply_markup = ReplyKeyboardMarkup(keyboard) update.message.reply_text(text=results, parse_mode=ParseMode.MARKDOWN, disable_web_page_preview=True, reply_markup=reply_markup) else: question = fact_list[0].Slots['display'] valid_answers = fact_list[0].Slots['valid-answers'] keyboard = [] for answer in valid_answers: keyboard.append([KeyboardButton(text=answer)]) keyboard.append([KeyboardButton(text=emojize(':back: Previous', use_aliases=True))]) keyboard.append([KeyboardButton(text=emojize(':x: Cancel', use_aliases=True))]) reply_markup = ReplyKeyboardMarkup(keyboard) update.message.reply_text(text=question, reply_markup=reply_markup) dispatcher.add_handler(MessageHandler(Filters.text, handleEvent)) def handleEvent(bot, update): """ Triggers the next state in working memory based on which button was pressed. """ # Get the state-list fact_list = clips.Eval('(find-all-facts ((?f state-list)) TRUE)') if len(fact_list) == 0: return current_id = fact_list[0].Slots['current'] # Get the current UI state fact_list = clips.Eval('(find-all-facts ((?f UI-state)) (eq ?f:id %s))' % current_id) if len(fact_list) == 0: return valid_answers = fact_list[0].Slots['valid-answers'] if update.message.text in valid_answers: clips.Assert('(next %s %s)' % (current_id, update.message.text)) clips.Run() nextUIState(bot, update) elif update.message.text == emojize(':back: Previous', use_aliases=True): clips.Assert('(prev %s)' % current_id) clips.Run() nextUIState(bot, update) elif update.message.text == emojize(':repeat: Restart', use_aliases=True): new(bot, update) elif update.message.text == emojize(':x: Cancel', use_aliases=True): cancel(bot, update) def cancel(bot, update): """ Ends the chat with the beer expert when the command /cancel is issued. """ update.message.reply_text(text='Bye! I hope we can talk again some day. 👋🏻', reply_markup=ReplyKeyboardRemove()) clips.Reset() def unknown(bot, update): """ Sends an error message when an unrecognized command is typed. """ bot.send_message(chat_id=update.message.chat_id, text='Unrecognized command. Say what?') def error(bot, update, error): """ Log errors caused by updates. """ logger.warning('Update %s caused error %s' % (update, error)) if __name__ == '__main__': # Load the Beer EXpert system clips.Load('./clips/beerex.clp') token = open('token', 'r').read() # Create the updater and pass it the bot's token. updater = Updater(token) # Get the dispatcher to register handlers dispatcher = updater.dispatcher # Add command and message handlers dispatcher.add_handler(CommandHandler('start', start)) dispatcher.add_handler(CommandHandler('new', new)) dispatcher.add_handler(CommandHandler('cancel', cancel)) dispatcher.add_handler(MessageHandler(Filters.command, unknown)) # Log all errors dispatcher.add_error_handler(error) updater.start_webhook(listen="0.0.0.0", port=int(os.environ.get('PORT', '5000')), url_path=token) updater.bot.set_webhook("https://beerex-telegram-bot.herokuapp.com/" + token) # Start the bot # updater.start_polling() # Run the bot until you press Ctrl-C or the process receives SIGINT, # SIGTERM or SIGABRT. This should be used most of the time, since # start_polling() is non-blocking and will stop the bot gracefully. updater.idle()
Python
0
@@ -5651,18 +5651,16 @@ bot%0A - # updater
5ede219cd4613af0fecbf415030aaa23df1ff3ee
Add test for measurement order
test/core/measurements_test.py
test/core/measurements_test.py
# Copyright 2016 Google Inc. All Rights Reserved. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Test various measurements use cases. The test cases here need improvement - they should check for things that we actually care about. """ from examples import all_the_things import openhtf as htf from openhtf.util import test as htf_test # Fields that are considered 'volatile' for record comparison. _VOLATILE_FIELDS = {'start_time_millis', 'end_time_millis', 'timestamp_millis', 'lineno', 'codeinfo', 'code_info', 'descriptor_id'} class TestMeasurements(htf_test.TestCase): def test_unit_enforcement(self): """Creating a measurement with invalid units should raise.""" self.assertRaises(TypeError, htf.Measurement('bad_units').with_units, 1701) @htf_test.patch_plugs(user_mock='openhtf.plugs.user_input.UserInput') def test_chaining_in_measurement_declarations(self, user_mock): user_mock.prompt.return_value = 'mock_widget' record = yield all_the_things.hello_world self.assertNotMeasured(record, 'unset_meas') self.assertMeasured(record, 'widget_type', 'mock_widget') self.assertMeasured(record, 'widget_color', 'Black') self.assertMeasurementPass(record, 'widget_size') self.assertMeasurementPass(record, 'specified_as_args') @htf_test.yields_phases def test_measurements_with_dimenstions(self): record = yield all_the_things.dimensions self.assertNotMeasured(record, 'unset_dims') self.assertMeasured(record, 'dimensions', [(0, 1), (1, 2), (2, 4), (3, 8), (4, 16)]) self.assertMeasured(record, 'lots_of_dims', [(1, 21, 101, 123), (2, 22, 102, 126), (3, 23, 103, 129), (4, 24, 104, 132)]) @htf_test.yields_phases def test_validator_replacement(self): record = yield all_the_things.measures_with_args.with_args(min=2, max=4) self.assertMeasurementFail(record, 'replaced_min_only') self.assertMeasurementPass(record, 'replaced_max_only') self.assertMeasurementFail(record, 'replaced_min_max') record = yield all_the_things.measures_with_args.with_args(min=0, max=5) self.assertMeasurementPass(record, 'replaced_min_only') self.assertMeasurementPass(record, 'replaced_max_only') self.assertMeasurementPass(record, 'replaced_min_max') record = yield all_the_things.measures_with_args.with_args(min=-1, max=0) self.assertMeasurementPass(record, 'replaced_min_only') self.assertMeasurementFail(record, 'replaced_max_only') self.assertMeasurementFail(record, 'replaced_min_max')
Python
0
@@ -3064,28 +3064,485 @@ record, 'replaced_min_max')%0A +%0A @htf_test.yields_phases%0A def test_measurement_order(self):%0A record = yield all_the_things.dimensions%0A self.assertEqual(record.measurements.keys(),%0A %5B'unset_dims', 'dimensions', 'lots_of_dims'%5D)%0A record = yield all_the_things.measures_with_args.with_args(min=2, max=4)%0A self.assertEqual(record.measurements.keys(),%0A %5B'replaced_min_only', 'replaced_max_only',%0A 'replaced_min_max'%5D)%0A
bfe9e372a7f891a1537f37e383548612ce49fb9c
Change error message to get item_id + few fixes
lazyblacksmith/tasks/market_order.py
lazyblacksmith/tasks/market_order.py
# -*- encoding: utf-8 -*- import config import time from lazyblacksmith.extension.celery_app import celery_app from lazyblacksmith.models import ItemAdjustedPrice from lazyblacksmith.models import ItemPrice from lazyblacksmith.models import Region from lazyblacksmith.models import db from lazyblacksmith.utils.crestutils import get_all_items from lazyblacksmith.utils.crestutils import get_by_attr from lazyblacksmith.utils.crestutils import get_crest from lazyblacksmith.utils.pycrest.errors import APIException from requests.exceptions import Timeout import gevent from ratelimiter import RateLimiter from gevent import monkey monkey.patch_all() rate_limiter = RateLimiter(max_calls=config.CREST_REQ_RATE_LIM / 2, period=1) def crest_order_price(market_crest_url, type_url, item_id, region): """ Get and return the orders <type> (sell|buy) from a given region for a given type """ # call the crest page and extract all items from every pages if required try: buy_orders_crest = get_all_items(market_crest_url.marketBuyOrders(type=type_url)) sell_orders_crest = get_all_items(market_crest_url.marketSellOrders(type=type_url)) except APIException as api_e: if "503" in str(api_e): print "[%s] Error 503 happened !" % time.strftime("%x %X") else: print "%s Unexpected error : %s " % (time.strftime("%x %X"), api_e) return except Timeout: print "[%s] Error: timeout while getting price from crest !" % time.strftime("%x %X") return # if no orders, if not sell_orders_crest or not buy_orders_crest: return # extract min/max sell_price = min(sell_orders_crest, key=lambda order: order.price) buy_price = max(buy_orders_crest, key=lambda order: order.price) return { 'item_id': item_id, 'sell_price': sell_price, 'buy_price': buy_price, 'region_id': region.id, } @celery_app.task(name="schedule.update_market_price") def update_market_price(): """Celery task to upgrade prices through CREST""" crest = get_crest() item_type_url = crest.itemTypes.href region_list = Region.query.filter( Region.id.in_(config.CREST_REGION_PRICE) ).all() item_list = ItemAdjustedPrice.query.all() # number in pool is the max per second we want. greenlet_pool = [] raw_sql_query = """ INSERT INTO %s (item_id, region_id, sell_price, buy_price) VALUES (:item_id, :region_id, :sell_price, :buy_price) ON DUPLICATE KEY UPDATE sell_price = :sell_price, buy_price = :buy_price """ % ItemPrice.__tablename__ # loop over regions for region in region_list: market_crest = (get_by_attr(get_all_items(crest.regions()), 'name', region.name))() # donner market_crest en param # récup sell et buy # return dict avec tout dedans # recup la liste de tous les order # bulk insert. # loop over items for item in item_list: type_url = '%s%s/' % (item_type_url, item.item_id) # use rate limited contexte to prevent too much greenlet spawn per seconds with rate_limiter: # greenlet spawn buy order getter greenlet_pool.append(gevent.spawn( crest_order_price, market_crest, type_url, item.item_id, region )) gevent.joinall(greenlet_pool) results = [greenlet.value for greenlet in greenlet_pool] db.engine.execute( raw_sql_query, results ) greenlet_pool = []
Python
0
@@ -1279,20 +1279,33 @@ ppened ! + Item ID: %25s %22 %25 +( time.str @@ -1310,32 +1310,42 @@ trftime(%22%25x %25X%22) +, item_id) %0A else:%0A @@ -1381,18 +1381,27 @@ ed error +. Item ID: -: %25s %22 %25 @@ -1425,21 +1425,23 @@ x %25X%22), -api_e +item_id )%0A @@ -1448,16 +1448,21 @@ return + None %0A exc @@ -1541,20 +1541,33 @@ crest ! + Item ID: %25s %22 %25 +( time.str @@ -1580,16 +1580,26 @@ %22%25x %25X%22) +, item_id) %0A @@ -1597,32 +1597,37 @@ )%0A return + None %0A%0A # if no or @@ -1630,17 +1630,25 @@ o orders -, + found... %0A if @@ -1942,24 +1942,30 @@ : sell_price +.price ,%0A 'b @@ -1980,24 +1980,30 @@ ': buy_price +.price ,%0A 'r @@ -3672,16 +3672,34 @@ let_pool + if greenlet.value %5D%0A
ff6d9647b72c2101480f170e55ee28fd0cb37d11
add MME tag by default
xbrowse_server/base/management/commands/add_default_tags.py
xbrowse_server/base/management/commands/add_default_tags.py
import sys from optparse import make_option from xbrowse_server import xbrowse_controls from django.core.management.base import BaseCommand from xbrowse_server.base.models import Project, ProjectTag, Family def get_or_create_project_tag(project, tag_name, description, color='#1f78b4'): """ Gets or creates a particular ProjectTag in a given project. Args: project: The project that contains this tag tag_name: The name of the new tag (can contain spaces) (eg. "Causal Variant") description: (eg. "causal variant") Returns: new ProjectTag model object (or an existing one if a match was found) """ project_tag, created = ProjectTag.objects.get_or_create(project=project, tag=tag_name) if created: print("Created new tag: %s : %s" % (project, tag_name)) project_tag.title=description project_tag.color=color project_tag.save() class Command(BaseCommand): def add_arguments(self, parser): parser.add_argument('args', nargs='*') parser.add_argument('-p', '--print-tags', help="Print what tags are bieng used", action="store_true") def handle(self, *args, **options): if len(args) < 1: sys.exit("ERROR: must specify 1 or more project_ids on the command line") project_ids = args if options["print_tags"]: for project in Project.objects.all(): print("========") users = list(project.get_users()) if users and len(ProjectTag.objects.filter(project=project, tag='VUS')) == 0: print("##### " + project.project_id + " #### " + ",".join(map(str, users)) + ", " + ("%s families" % len(Family.objects.filter(project=project)))) for project_tag in ProjectTag.objects.filter(project=project): print(project_tag.tag + ": " + project_tag.title) for project_id in project_ids: project = Project.objects.get(project_id=project_id) get_or_create_project_tag(project, tag_name="Review", description="", color='#88CCDD') # blue get_or_create_project_tag(project, tag_name="Incidental Finding", description="", color='#FFAA33') get_or_create_project_tag(project, tag_name="Novel Gene", description="", color='#FF0000') # 4C0083 get_or_create_project_tag(project, tag_name="Known Gene Phenotype Expansion", description="", color='#5521CC') get_or_create_project_tag(project, tag_name="Known Gene for Phenotype", description="", color='#2177DD') get_or_create_project_tag(project, tag_name="Pathogenic", description="Potential candidate gene", color='#AA1111') # red get_or_create_project_tag(project, tag_name="Likely Pathogenic", description="Likely pathogenic", color='#FF9988') # light red get_or_create_project_tag(project, tag_name="VUS", description="Uncertain significance", color='#AAAAAA') # gray get_or_create_project_tag(project, tag_name="Likely Benign", description="Likely Benign", color='#B2DF8A') # light green get_or_create_project_tag(project, tag_name="Benign", description="Strong evidence", color='#11AA11') # green print("Done") """ Review Review Incidental Finding Incidental finding Known Gene for Phenotype Known gene for phenotype Known Gene Phenotype Expansion Known gene phenotype expansion Novel Gene Novel gene Pathogenic Potential candidate gene Likely Pathogenic Moderate and supporting evidence VUS Uncertain significance Likely Benign Moderate and supporting evidence Benign Strong evidence """
Python
0
@@ -3254,24 +3254,141 @@ 1') # green +%0A%0A get_or_create_project_tag(project, tag_name=%22MME%22, description=%22Match Maker Exchange%22, color='#ff7f00') %0A pri
66c33c880d1e5f20a23e01937f8c88f5b66bfc5c
fix SQL error on non existing column
addons/website_membership/models/membership.py
addons/website_membership/models/membership.py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import models class MembershipLine(models.Model): _inherit = 'membership.membership_line' def get_published_companies(self, limit=None): if not self.ids: return [] limit_clause = '' if limit is None else ' LIMIT %d' % limit self.env.cr.execute(""" SELECT DISTINCT p.id FROM res_partner p INNER JOIN membership_membership_line m ON p.id = m.partner WHERE website_published AND is_company AND m.id IN %s """ + limit_clause, (tuple(self.ids),)) return [partner_id[0] for partner_id in self.env.cr.fetchall()]
Python
0.000272
@@ -557,15 +557,10 @@ ERE -website +is _pub
d5cb2a37ea77b15c5725d6ebf8e0ab79f3bea613
Fix interface in historian service interface
flow_workflow/historian/service_interface.py
flow_workflow/historian/service_interface.py
import logging from flow_workflow.historian.messages import UpdateMessage LOG = logging.getLogger(__name__) class WorkflowHistorianServiceInterface(object): def __init__(self, broker=None, exchange=None, routing_key=None): self.broker = broker self.exchange = exchange self.routing_key = routing_key def update(self, net_key, operation_id, name, workflow_plan_id, **kwargs): if workflow_plan_id < 0: # ignore update (don't even make message) LOG.debug("Received negative workflow_plan_id:%s, " "ignoring update (net_key=%s, operation_id=%s, name=%s," "workflow_plan_id=%s, kwargs=%s)", workflow_plan_id, net_key, peration_id, name, workflow_plan_id, kwargs) else: LOG.debug("Sending update (net_key=%s, operation_id=%s, name=%s," "workflow_plan_id=%s, kwargs=%s)", net_key, peration_id, name, workflow_plan_id, kwargs) message = UpdateMessage(net_key=net_key, operation_id=operation_id, **kwargs) self.broker.publish(self.exchange, self.routing_key, message)
Python
0.000004
@@ -1002,32 +1002,33 @@ net_key, +o peration_id, nam @@ -1136,16 +1136,82 @@ ion_id,%0A + name=name, workflow_plan_id=workflow_plan_id,%0A
26f5adea28f81ebbe830d4a207958320e0b40520
update version
gtfparse/__init__.py
gtfparse/__init__.py
# Copyright (c) 2015. Mount Sinai School of Medicine # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .attribute_parsing import expand_attribute_strings from .create_missing_features import create_missing_features from .line_parsing import parse_gtf_lines from .required_columns import REQUIRED_COLUMNS from .parsing_error import ParsingError from .read_gtf import read_gtf_as_dataframe, read_gtf_as_dict __version__ = "0.2.2" __all__ = [ "expand_attribute_strings", "create_missing_features", "parse_gtf_lines", "REQUIRED_COLUMNS", "ParsingError", "read_gtf_as_dataframe", "read_gtf_as_dict", ]
Python
0
@@ -921,17 +921,17 @@ = %220.2. -2 +3 %22%0A%0A__all
85dc28b44def27658e282d621749598ec80ea420
Fix typo
ambari-server/src/main/python/TeardownAgent.py
ambari-server/src/main/python/TeardownAgent.py
#!/usr/bin/env python ''' Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ''' import sys import logging import os import subprocess def exec_os_command(os_command): os_stat = subprocess.Popen(os_command, stdout=subprocess.PIPE) return { "exitstatus": os.stat.returncode, "log": os_stat.communicate(0) } def is_suse(): """Return true if the current OS is Suse Linux, false otherwise""" if os.path.isfile("/etc/issue"): if "suse" in open("/etc/issue").read().lower(): return True return False def teardown_agent_suse(): """ Run zypper remove""" zypper_command = ["zypper", "remove", "-y", "ambari-agent"] return exec_os_command(zypper_command)['exitstatus'] def teardown_agent(): """ Run yum remove""" rpm_command = ["yum", "-y", "remove", "ambari-agent"] return exec_os_command(rpm_command)['exitstatus'] def parse_args(argv): onlyargs = argv[1:] pass_phrase = onlyargs[0] hostname = onlyargs[1] project_version = None if len(onlyargs) > 2: project_version = onlyargs[2] if project_version is None or project_version == "null": project_version = "" if project_version != "": project_version = "-" + project_version return (pass_phrase, hostname, project_version) def main(argv=None): script_dir = os.path.realpath(os.path.dirname(argv[0])) (pass_phrase, hostname, project_version) = parse_args(argv) exec_os_command(["ambari-agent", "stop"]) exec_os_command(["ambari-agent", "unregister"]) if is_suse(): exit_code = teardown_agent_suse() else: exit_code = teardown_agent() sys.exit(exit_code) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) main(sys.argv)
Python
0.999999
@@ -973,17 +973,17 @@ tus%22: os -. +_ stat.ret
31f55658d7495bf1fed8a5a466ffe54502a0348c
Make countersong check for language-dependent spells
tpdatasrc/tpgamefiles/scr/tpModifiers/countersong.py
tpdatasrc/tpgamefiles/scr/tpModifiers/countersong.py
from templeplus.pymod import PythonModifier from toee import * import tpdp def Remove(char, args, evt_obj): if evt_obj.is_modifier('Countersong'): args.condition_remove() return 0 countersong = PythonModifier() countersong.ExtendExisting('Countersong') countersong.AddHook(ET_OnConditionAddPre, EK_NONE, Remove, ())
Python
0
@@ -179,16 +179,463 @@ turn 0%0A%0A +# built-in hook only checks for Sonic descriptor%0Adef Lang(char, args, evt_obj):%0A%09lang = 1 %3C%3C (D20STD_F_SPELL_DESCRIPTOR_LANGUAGE_DEPENDENT-1)%0A%09sonic = 1 %3C%3C (D20S%25D_F_SPELL_DESCRIPTOR_SONIC-1)%0A%0A%09if (evt_obj.flags & lang) and not (evt_obj.flags & sonic):%0A%09%09perform = args.get_arg(1)%0A%09%09save_bonus = evt_obj.bonus_list.get_sum()%0A%09%09delta = perform - save_bonus - evt_obj.roll_result%0A%09%09if delta %3E 0:%0A%09%09%09evt_obj.bonus_list.add(delta, 0, 192)%0A%0A%09return 0%0A%0A counters @@ -762,8 +762,74 @@ ve, ())%0A +countersong.AddHook(ET_OnCountersongSaveThrow, EK_NONE, Lang, ())%0A
e535def2bc9b7de203e1fd37fc592cdeed1be526
fix selection bug
src/choose.py
src/choose.py
# Copyright (c) 2015-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. # # @nolint from __future__ import print_function import curses import pickle import sys import os import output import screenControl import logger PICKLE_FILE = '~/.fbPager.pickle' SELECTION_PICKLE = '~/.fbPager.selection.pickle' LOAD_SELECTION_WARNING = ''' WARNING! Loading the standard input and previous selection failed. This is probably due to a backwards compatibility issue with upgrading PathPicker or an internal error. Please pipe a new set of input to PathPicker to start fresh (after which this error will go away) ''' def doProgram(stdscr): output.clearFile() logger.clearFile() lineObjs = getLineObjs() screen = screenControl.Controller(stdscr, lineObjs) screen.control() def getLineObjs(): filePath = os.path.expanduser(PICKLE_FILE) try: lineObjs = pickle.load(open(filePath, 'rb')) except: output.appendError(LOAD_SELECTION_WARNING) sys.exit(1) logger.addEvent('total_num_files', len(lineObjs.items())) selectionPath = os.path.expanduser(SELECTION_PICKLE) if os.path.isfile(selectionPath): setSelectionsFromPickle(lineObjs) matches = [lineObj for i, lineObj in lineObjs.items() if not lineObj.isSimple()] if not len(matches): output.writeToFile('echo "No lines matched!!"') sys.exit(0) return lineObjs def setSelectionsFromPickle(lineObjs): try: selectedIndices = pickle.load(open(selectionPath, 'rb')) except: output.appendError(LOAD_SELECTION_WARNING) sys.exit(1) for index in selectedIndices: if index >= len(lineObjs.items()): error = 'Found index %d more than total matches' % index output.appendError(error) continue toSelect = lineObjs[index] if isinstance(toSelect, format.LineMatch): lineObjs[index].setSelect(True) else: error = 'Line %d was selected but is not LineMatch' % index output.appendError(error) if __name__ == '__main__': if not os.path.exists(os.path.expanduser(PICKLE_FILE)): print('Nothing to do!') output.writeToFile('echo ":D"') sys.exit(0) output.clearFile() curses.wrapper(doProgram)
Python
0
@@ -437,16 +437,30 @@ t logger +%0Aimport format %0A%0APICKLE @@ -1404,32 +1404,47 @@ tionsFromPickle( +selectionPath, lineObjs)%0A%0A m @@ -1688,16 +1688,31 @@ mPickle( +selectionPath, lineObjs
ec7150144682afb1f64cd3ba3713207912820264
Remove add_station reference.
agent/manager.py
agent/manager.py
import json import logging import threading import time import pika from db.db import session from db.models import Metric, WeatherStation LOG_FORMAT = ('%(levelname) -10s %(asctime)s %(name) -30s %(funcName) ' '-35s %(lineno) -5d: %(message)s') LOGGER = logging.getLogger(__name__) class AgentManager(object): def __init__(self, cfg): self.connection = None self.channel = None self.cfg = cfg def connect(self, forever=True): while True: try: parameters = pika.ConnectionParameters(host=str(self.cfg['broker']), port=int(self.cfg['port']), virtual_host='/') self.connection = pika.BlockingConnection(parameters) self.channel = self.connection.channel() self.fan_out() # Perodic. break except Exception as e: if not forever: raise LOGGER.error("Connection failed. Trying again...") time.sleep(1) continue def stop(self): if self.connection: self.connection.close() def _publish(self, msg): try: published = self.channel.basic_publish(self.cfg['exchange'], self.cfg['routingKey'], msg, pika.BasicProperties( content_type='application/json', delivery_mode=2), # Persistent, mandatory=True ) if not published: raise except Exception as e: LOGGER.error('Error %s when sending message.', str(e)) raise def publish_station(self, station): msg_dict = { "action": "add_station", "data": { "id": station.id, "name": station.name, "latitude": station.latitude, "longitude": station.longitude, "metric_types": [mt.id for mt in station.metric_types], } } msg = json.dumps(msg_dict) self._publish(msg) def publish_metric(self, metric): msg_dict = { "action": "add_metric", "data": { "id": metric.id, "value": metric.value, "metric_type_id": metric.metric_type_id, "weather_station_id": metric.weather_station_id, } } msg = json.dumps(msg_dict) self._publish(msg) def fan_out(self, period=30): LOGGER.debug('Fanning out rows...') stations = session.query(WeatherStation).filter_by(is_sent=False).all() for station in stations: session.begin() try: self.publish_station(station) station.is_sent = True session.commit() except Exception as e: LOGGER.error('Error %s when processing station.', str(e)) session.rollback() raise metrics = session.query(Metric).filter_by(is_sent=False).all() for metric in metrics: session.begin() try: self.publish_metric(metric) metric.is_sent = True session.commit() except Exception as e: LOGGER.error('Error %s when processing metric.', str(e)) session.rollback() raise threading.Timer(period, self.fan_out).start() # Periodic loop. def run(self): self.add_station() self.connect() def main(): logging.basicConfig(level=logging.DEBUG, format=LOG_FORMAT) cfg = json.load(open("config.json")) manager = AgentManager(cfg) try: manager.run() except KeyboardInterrupt: manager.stop() if __name__ == '__main__': main()
Python
0
@@ -3796,35 +3796,8 @@ f):%0A - self.add_station()%0A
a6372a8be985ef630e960d8dab5a4de02f12739a
fix indentation
testbeam_analysis/testing/test_examples.py
testbeam_analysis/testing/test_examples.py
''' Script to check that the examples run. The example data is reduced at the beginning to safe time. ''' import os import unittest import mock from shutil import copyfile import tables as tb import testbeam_analysis from testbeam_analysis.tools import data_selection from testbeam_analysis.examples import (eutelescope, fei4_telescope, simulated_data) # Get the absoulte path of the online_monitor installation testing_path = os.path.dirname(__file__) package_path = os.path.dirname(testbeam_analysis.__file__) script_folder = os.path.abspath(os.path.join(package_path, r'examples/')) fixture_folder = os.path.abspath(os.path.join(os.path.dirname( os.path.realpath(testing_path)) + r'/testing/fixtures/examples/')) tests_data_folder = os.path.abspath( os.path.join(os.path.realpath(script_folder), r'data/')) def copy_alignment(path, out_folder, **kwarg): try: os.mkdir(os.path.join(tests_data_folder, out_folder)) except OSError: pass copyfile(os.path.join(fixture_folder, path), os.path.join(tests_data_folder, os.path.join(out_folder, 'Alignment.h5'))) class TestExamples(unittest.TestCase): @classmethod def setUpClass(cls): # Virtual X server for plots under headless LINUX travis testing needed if os.getenv('TRAVIS', False): from xvfbwrapper import Xvfb cls.vdisplay = Xvfb() cls.vdisplay.start() cls.output_folder = tests_data_folder # Reduce the example data to make it possible to test the examples in # CI environments cls.examples_fei4_hit_files = [os.path.join( cls.output_folder, r'TestBeamData_FEI4_DUT0.h5'), os.path.join( cls.output_folder, r'TestBeamData_FEI4_DUT1.h5'), os.path.join( cls.output_folder, r'TestBeamData_FEI4_DUT4.h5'), os.path.join( cls.output_folder, r'TestBeamData_FEI4_DUT5.h5')] data_selection.reduce_hit_files( cls.examples_fei4_hit_files, fraction=100) cls.examples_mimosa_hit_files = [os.path.join( cls.output_folder, r'TestBeamData_Mimosa26_DUT%d.h5') % i for i in range(6)] data_selection.reduce_hit_files( cls.examples_mimosa_hit_files, fraction=100) # Remove old files and rename reduced files for file_name in cls.examples_fei4_hit_files: os.remove(file_name) os.rename(os.path.splitext(file_name)[0] + '_reduced.h5', file_name) for file_name in cls.examples_mimosa_hit_files: os.remove(file_name) os.rename(os.path.splitext(file_name)[0] + '_reduced.h5', file_name) # Alignments do not converge for reduced data set # Thus mock out the alignment steps @mock.patch('testbeam_analysis.dut_alignment.prealignment', side_effect=copy_alignment( path=r'eutelescope/Alignment.h5', out_folder=r'output_eutel') ) @mock.patch('testbeam_analysis.dut_alignment.alignment') # TODO: Analysis fails, to be checked why @mock.patch('testbeam_analysis.result_analysis.calculate_residuals') def test_mimosa_example(self, m1, m2, m3): eutelescope.run_analysis() # Prealignment does not converge for reduced data set # Thus mock out the prealignment @mock.patch('testbeam_analysis.dut_alignment.prealignment', side_effect=copy_alignment( path=r'fei4_telescope/Alignment.h5', out_folder=r'output_fei4'), ) def test_fei4_example(self, mock): fei4_telescope.run_analysis() def test_simulated_data_example(self): ''' Check the example and the overall analysis that a efficiency of about 100% is reached. Not a perfect 100% is expected due to the finite propability that tracks are merged since > 2 tracks per event are simulated ''' simulated_data.run_analysis(1000) with tb.open_file('simulation/Efficiency.h5') as in_file_h5: for i in range(5): # Loop over DUT index eff = in_file_h5.get_node('/DUT_%d/Efficiency' % i)[:] mask = in_file_h5.get_node('/DUT_%d/Efficiency_mask' % i)[:] self.assertAlmostEqual(eff[~mask].mean(), 100., delta=0.0001) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestExamples) unittest.TextTestRunner(verbosity=2).run(suite)
Python
0.000008
@@ -4543,16 +4543,17 @@ .0001)%0A%0A +%0A if __nam
01a574cc5ed872df93b70795916acae978e4b86f
Make TripleStatsHandler return just 3 URLs (in order) to its entity charts.
marry-fuck-kill/triple_handlers.py
marry-fuck-kill/triple_handlers.py
#!/usr/bin/env python # # Copyright 2010 Hunter Freyer and Michael Kelly # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import urllib from google.appengine.ext import db from google.appengine.ext import webapp from google.appengine.ext.webapp import util import models import utils class TripleCreationHandler(webapp.RequestHandler): def get(self): self.response.out.write(""" <h1>Create a Triple!</h1> <form method="post"> One: name=<input type="text" name="n1"></input> url=<input type="text" name="u1"></input> q=<input type="text" name="q1"></input><br/> Two: name=<input type="text" name="n2"></input> url=<input type="text" name="u2"></input> q=<input type="text" name="q2"></input><br/> Three: name=<input type="text" name="n3"></input> url=<input type="text" name="u3"></input> q=<input type="text" name="q3"></input><br/> <input type="submit"></input> </form> """) def post(self): try: triple = TripleCreationHandler.MakeTriple(self.request) # TODO(mjkelly): restrict this later when we have some idea of what # we'll throw. Or perhaps not? except models.EntityValidationError, e: logging.info("Error creating triple from req: %s", self.request) self.response.out.write('error: %s' % e) return # Success # TODO(mjkelly): stop using meta refresh redirects logging.info("Success creating triple from req: %s", self.request) self.response.out.write('ok: created %s' % triple.key().name()) @staticmethod def MakeTriple(request): """Create the named triple. We expect the following request params: n[1-3]: the 3 triple display names u[1-3]: the 3 triple image URLs q[1-3]: the search string used to find u[1-3] The only non-obvious part of this is that we check that q[1-3] actually include u[1-3]. This is to prevent users from adding any URL they please. """ # Grab all the URL params at once. entities = [{'n': request.get('n1'), 'u': request.get('u1'), 'q': request.get('q1')}, {'n': request.get('n2'), 'u': request.get('u2'), 'q': request.get('q2')}, {'n': request.get('n3'), 'u': request.get('u3'), 'q': request.get('q3')}] for i in range(len(entities)): for k in ['n', 'u', 'q']: if not entities[i][k]: raise ValueError("Entity %s missing attribute '%s'" % (i, k)) # This may raise a URLError or EntityValidatationError. one = models.PutEntity(entities[0]['n'], entities[0]['u'], entities[0]['q']) two = models.PutEntity(entities[1]['n'], entities[1]['u'], entities[0]['q']) three = models.PutEntity(entities[2]['n'], entities[2]['u'], entities[0]['q']) # This may raise an EntityValidationError. models.Triple.validate(one, two, three) return models.PutTriple(one=one, two=two, three=three) class TripleJsonHandler(webapp.RequestHandler): def post(self, unused_id): try: triple = TripleCreationHandler.MakeTriple(self.request) except ValueError, e: self.response.out.write('error:%s' % e) return except models.EntityValidationError, e: self.response.out.write('error:%s' % e) return self.response.out.write('ok:%s' % str(triple.key())) class TripleStatsHandler(webapp.RequestHandler): def get(self, triple_id): if triple_id: t = models.Triple.get(urllib.unquote(triple_id)) self.response.out.write('%s: %r' % (triple_id, t)) entities = [t.one, t.two, t.three] for e in entities: self.response.out.write('<h2>%s</h2>' % e) m = e.assignment_reference_marry_set.count() f = e.assignment_reference_fuck_set.count() k = e.assignment_reference_kill_set.count() self.response.out.write("<p>m=%d, f=%d, k=%d</p>" % (m, f, k)) else: keys = [t.key() for t in models.Triple.all()] self.response.out.write(""" <h1>All Triples (%d):</h1> <ul> %s </ul> """ % (len(keys), ''.join(['<li><a href="%s">%s</li>\n' % (k, k) for k in keys])))
Python
0
@@ -3977,16 +3977,20 @@ %0A if +not triple_i @@ -3990,24 +3990,70 @@ riple_id:%0A + raise Exception(%22Need triple key%22)%0A %0A t = mode @@ -4101,631 +4101,192 @@ - self.response.out.write('%25s: %25r' %25 (triple_id, t))%0A entities = %5Bt.one, t.two, t.three%5D%0A%0A for e in entities:%0A self.response.out.write('%3Ch2%3E%25s%3C/h2%3E' %25 e)%0A m = e.assignment_reference_marry_set.count()%0A f = e.assignment_reference_fuck_set.count()%0A k = e.assignment_reference_kill_set.count()%0A self.response.out.write(%22%3Cp%3Em=%25d, f=%25d, k=%25d%3C/p%3E%22 %25 (m, f, k))%0A else:%0A keys = %5Bt.key() for t in models.Triple.all()%5D%0A%0A self.response.out.write(%22%22%22%0A%3Ch1%3EAll Triples (%25d):%3C/h1%3E%0A%3Cul%3E%0A%25s%0A%3C/ul%3E%0A%22%22%22 %25 (len(keys),%0A ''.join(%5B'%3Cli%3E%3Ca href=%22%25s%22%3E%25s%3C/li%3E%5Cn' %25 (k, k) for k in keys%5D)) +entities = %5Bt.one, t.two, t.three%5D%0A%0A self.response.headers%5B'Content-Type'%5D = %22text/plain%22;%0A for e in %5Bt.one, t.two, t.three%5D:%0A self.response.out.write(e.get_stats_url() + '%5Cn' )%0A
0a44fc07efb902912e22e72979f69fbab200cd32
Update version 0.6.8 -> 0.6.9
dimod/package_info.py
dimod/package_info.py
__version__ = '0.6.8' __author__ = 'D-Wave Systems Inc.' __authoremail__ = '[email protected]' __description__ = 'A shared API for binary quadratic model samplers.'
Python
0
@@ -16,9 +16,9 @@ 0.6. -8 +9 '%0A__
81cbe1730702343d62dcf7585f3387291644b17c
fix reference to private modules, use absolute imports
neuroglancer_scripts/chunk_encoding.py
neuroglancer_scripts/chunk_encoding.py
# Copyright (c) 2018 Forschungszentrum Juelich GmbH # Author: Yann Leprince <[email protected]> # # This software is made available under the MIT licence, see LICENCE.txt. import numpy as np def get_encoder(info, scale_info, encoder_params={}): data_type = info["data_type"] num_channels = info["num_channels"] encoding = scale_info["encoding"] if encoding == "raw": return RawChunkEncoder(data_type, num_channels) elif encoding == "compressed_segmentation": block_size = scale_info["compressed_segmentation_block_size"] return CompressedSegmentationEncoder(data_type, num_channels, block_size) elif encoding == "jpeg": # TODO properly handle missing params jpeg_quality = encoder_params["jpeg_quality"] jpeg_plane = encoder_params["jpeg_plane"] return JpegChunkEncoder(data_type, num_channels, jpeg_quality, jpeg_plane) else: return RuntimeError("Invalid encoding") # TODO appropriate error type? def add_argparse_options(parser, allow_lossy): if allow_lossy: group = parser.add_argument_group("Options for JPEG compression") group.add_argument("--jpeg-quality", type=int, default=95, metavar="Q", help="JPEG quality factor (from 0 to 100, values " "above 95 provide little extra quality but " "increase file size)") group.add_argument("--jpeg-plane", choices=("xy", "xz"), default="xy", help='plane of JPEG compression (default: xy)') class IncompatibleEncoderError(Exception): pass class ChunkEncoder: def __init__(self, data_type, num_channels): assert num_channels > 0 self.num_channels = num_channels self.dtype = np.dtype(data_type).newbyteorder("<") class RawChunkEncoder(ChunkEncoder): lossy = False already_compressed = False def encode(self, chunk): assert chunk.dtype == self.dtype assert chunk.ndim == 4 assert chunk.shape[0] == self.num_channels buf = chunk.tobytes() return buf def decode(self, buf, chunk_size): return np.frombuffer(buf, dtype=self.dtype).reshape( (self.num_channels, chunk_size[2], chunk_size[1], chunk_size[0])) class CompressedSegmentationEncoder(ChunkEncoder): lossy = False already_compressed = False def __init__(self, data_type, num_channels, block_size): if data_type not in ("uint32", "uint64"): raise IncompatibleEncoderError( "compressed_segmentation encoding can only handle uint32 or " "uint64 data_type") super().__init__(data_type, num_channels) self.block_size = block_size def encode(self, chunk): from . import compressed_segmentation assert chunk.dtype == self.dtype assert chunk.ndim == 4 assert chunk.shape[0] == self.num_channels buf = compressed_segmentation.encode_chunk(chunk, self.block_size) return buf def decode(self, buf, chunk_size): from . import compressed_segmentation chunk = np.empty( (self.num_channels, chunk_size[2], chunk_size[1], chunk_size[0]), dtype=self.dtype ) compressed_segmentation.decode_chunk_into(chunk, buf, self.block_size) return chunk class JpegChunkEncoder(ChunkEncoder): lossy = False already_compressed = True def __init__(self, data_type, num_channels, jpeg_quality, jpeg_plane): if data_type != "uint8" or num_channels not in (1, 3): raise IncompatibleEncoderError( "JPEG encoding can only handle uint8 data_type with 1 or 3 " "channels") super().__init__(data_type, num_channels) self.jpeg_quality = jpeg_quality self.jpeg_plane = jpeg_plane def encode(self, chunk): from . import jpeg assert chunk.dtype == self.dtype assert chunk.ndim == 4 assert chunk.shape[0] == self.num_channels buf = jpeg.encode_chunk(chunk, self.jpeg_quality, self.jpeg_plane) return buf def decode(self, buf, chunk_size): from . import jpeg return jpeg.decode_chunk(buf, chunk_size, self.num_channels)
Python
0
@@ -2872,33 +2872,52 @@ from -. +neuroglancer_scripts import compressed_s @@ -2896,32 +2896,33 @@ _scripts import +_ compressed_segme @@ -3066,16 +3066,17 @@ buf = +_ compress @@ -3200,25 +3200,44 @@ from -. +neuroglancer_scripts import compress @@ -3228,16 +3228,17 @@ import +_ compress @@ -3404,16 +3404,17 @@ +_ compress @@ -4040,33 +4040,52 @@ from -. +neuroglancer_scripts import jpeg%0A @@ -4064,32 +4064,33 @@ _scripts import +_ jpeg%0A ass @@ -4215,16 +4215,17 @@ buf = +_ jpeg.enc @@ -4353,17 +4353,36 @@ rom -. +neuroglancer_scripts import jpeg @@ -4377,16 +4377,17 @@ import +_ jpeg%0A @@ -4398,16 +4398,17 @@ return +_ jpeg.dec
9724f382b448948a0ce5ce9c5da935a46fa02a1a
Version bump
edx_proctoring/__init__.py
edx_proctoring/__init__.py
""" The exam proctoring subsystem for the Open edX platform. """ # Be sure to update the version number in edx_proctoring/package.json __version__ = '2.4.5' default_app_config = 'edx_proctoring.apps.EdxProctoringConfig' # pylint: disable=invalid-name
Python
0.000001
@@ -152,9 +152,9 @@ 2.4. -5 +6 '%0A%0Ad
6d86e8565a9ea1aac07b8a1470e2f3b724b981c2
fix for use on python 2.1
Lib/bsddb/test/test_misc.py
Lib/bsddb/test/test_misc.py
"""Miscellaneous bsddb module test cases """ import os import sys import unittest try: # For Python 2.3 from bsddb import db, dbshelve except ImportError: # For earlier Pythons w/distutils pybsddb from bsddb3 import db, dbshelve from test.test_support import verbose #---------------------------------------------------------------------- class MiscTestCase(unittest.TestCase): def setUp(self): self.filename = self.__class__.__name__ + '.db' homeDir = os.path.join(os.path.dirname(sys.argv[0]), 'db_home') self.homeDir = homeDir try: os.mkdir(homeDir) except OSError: pass def tearDown(self): try: os.remove(self.filename) except OSError: pass import glob files = glob.glob(os.path.join(self.homeDir, '*')) for file in files: os.remove(file) def test01_badpointer(self): dbs = dbshelve.open(self.filename) dbs.close() self.assertRaises(db.DBError, dbs.get, "foo") def test02_db_home(self): env = db.DBEnv() # check for crash fixed when db_home is used before open() assert env.db_home is None env.open(self.homeDir, db.DB_CREATE) assert self.homeDir == env.db_home #---------------------------------------------------------------------- def test_suite(): return unittest.makeSuite(MiscTestCase) if __name__ == '__main__': unittest.main(defaultTest='test_suite')
Python
0.000002
@@ -245,47 +245,8 @@ ve%0A%0A -from test.test_support import verbose%0A%0A #---
d7b8186f0f4115307753d0aef038ec61155c83bc
Fix typo in python
Test/Test-IO/python/TestIO.py
Test/Test-IO/python/TestIO.py
#!/usr/bin/python import timeit, sys, io def wrapper(func, *args, **kwargs): def wrapped(): return func(*args, **kwargs) return wrapped def start(file, outfile): #input = open(file, 'r') #output = open(outfile, 'w') line = input.readline() while line: line = line.replace('Tellus', 'Terra') line = line.replace('tellus', 'terra') output.write(line) line = input.readline() def main(argv): file = 'dump.txt' output = 'res.txt' for i in range(len(argv)): if argv[i] == '-f': i = i + 1 file = argv[i] elif argv[i] == '-o': i = i + 1 output = argv[i] #ns = time.time() wrapped = wrapper(start, file, output) print (timeit.timeit(wrapped, number=1)*1000) #totaltime = (time.time() - ns) / 1000000 #print (totaltime) sys.exit(0) if __name__ == '__main__':main(sys.argv[1:])
Python
0.999998
@@ -178,17 +178,16 @@ e):%0A -# input = @@ -206,17 +206,16 @@ r')%0A -# output =
ecb9ee29e2d934c9a53ed26860c3fa4327c62464
Add type "completion" in field.FIELDS
elasticsearch_dsl/field.py
elasticsearch_dsl/field.py
from datetime import date from dateutil import parser from .utils import DslBase, _make_dsl_class, ObjectBase, AttrDict __all__ = ['construct_field', 'Object', 'Nested', 'Date', 'String', 'Float', 'Double', 'Byte', 'Short', 'Integer', 'Long', 'Boolean', 'Ip', 'Attachment', 'GeoPoint', 'GeoShape', ] def construct_field(name_or_field, **params): # {"type": "string", "index": "not_analyzed"} if isinstance(name_or_field, dict): if params: raise ValueError('construct_field() cannot accept parameters when passing in a dict.') params = name_or_field.copy() if 'type' not in params: # inner object can be implicitly defined if 'properties' in params: name = 'object' else: raise ValueError('construct_field() needs to have a "type" key.') else: name = params.pop('type') return Field.get_dsl_class(name)(**params) # String() if isinstance(name_or_field, Field): if params: raise ValueError('construct_field() cannot accept parameters when passing in a construct_field object.') return name_or_field # "string", index="not_analyzed" return Field.get_dsl_class(name_or_field)(**params) class Field(DslBase): _type_name = 'field' _type_shortcut = staticmethod(construct_field) # all fields can be multifields _param_defs = {'fields': {'type': 'field', 'hash': True}} name = None def _to_python(self, data): return data def to_python(self, data): if isinstance(data, list): data[:] = map(self._to_python, data) return data return self._to_python(data) def to_dict(self): d = super(Field, self).to_dict() name, value = d.popitem() value['type'] = name return value class InnerObjectWrapper(ObjectBase): def __init__(self, mapping, **kwargs): # mimic DocType behavior with _doc_type.mapping super(AttrDict, self).__setattr__('_doc_type', type('Meta', (), {'mapping': mapping})) super(InnerObjectWrapper, self).__init__(**kwargs) class InnerObject(object): " Common functionality for nested and object fields. " _doc_class = InnerObjectWrapper _param_defs = {'properties': {'type': 'field', 'hash': True}} def property(self, name, *args, **kwargs): self.properties[name] = construct_field(*args, **kwargs) return self def empty(self): return {} def update(self, other_object): if not hasattr(other_object, 'properties'): # not an inner/nested object, no merge possible return our, other = self.properties, other_object.properties for name in other: if name in our: if hasattr(our[name], 'update'): our[name].update(other[name]) continue our[name] = other[name] def _to_python(self, data): # don't wrap already wrapped data if isinstance(data, self._doc_class): return data if isinstance(data, list): data[:] = list(map(self._to_python, data)) return data return self._doc_class(self.properties, **data) class Object(InnerObject, Field): name = 'object' class Nested(InnerObject, Field): name = 'nested' def empty(self): return [] class Date(Field): name = 'date' def _to_python(self, data): if isinstance(data, date): return data try: # TODO: add format awareness return parser.parse(data) except (TypeError, ValueError): raise #XXX FIELDS = ( 'string', 'float', 'double', 'byte', 'short', 'integer', 'long', 'boolean', 'ip', 'attachment', 'geo_point', 'geo_shape', ) # generate the query classes dynamicaly for f in FIELDS: fclass = _make_dsl_class(Field, f) globals()[fclass.__name__] = fclass __all__.append(fclass.__name__)
Python
0.000432
@@ -3904,16 +3904,34 @@ shape',%0A + 'completion',%0A )%0A%0A# gen
a9b27bc7c3821536657405790f38532db473b92c
Fix bug in recent shelves views
books/views/shelf_views.py
books/views/shelf_views.py
from datetime import datetime, timedelta from django.core.urlresolvers import reverse from django.contrib import messages from django.shortcuts import ( get_object_or_404, redirect, ) from django.views.generic import ( CreateView, DeleteView, DetailView, ListView, UpdateView, View, ) from books.models import ( Book, BookOnShelf, Shelf, ) from books.views import ( SEARCH_UPDATE_MESSAGE, LibraryMixin, ) from readers.models import Reader class ShelfListView(LibraryMixin, ListView): model = Shelf template_name = "shelf_list.html" class ShelfView(LibraryMixin, DetailView): model = Shelf template_name = "shelf.html" def get_context_data(self, **kwargs): context = super(ShelfView, self).get_context_data(**kwargs) context['books'] = [ book_on_shelf.book for book_on_shelf in BookOnShelf.objects.filter( shelf=self.get_object() ) ] return context class CreateShelfView(LibraryMixin, CreateView): model = Shelf template_name = "shelf_edit.html" fields = ['name',] def get_success_url(self): return reverse('shelf-list') def get_context_data(self, **kwargs): context = super(CreateShelfView, self).get_context_data(**kwargs) context['action'] = reverse('shelf-create') return context class EditShelfView(LibraryMixin, UpdateView): model = Shelf template_name = "shelf_edit.html" fields = ['name',] def get_success_url(self): return reverse( 'shelf-detail', kwargs={'pk': self.object.id} ) def get_context_data(self, **kwargs): context = super(EditShelfView, self).get_context_data(**kwargs) context['action'] = reverse( 'shelf-edit', kwargs={'pk': self.object.id}, ) return context def form_valid(self, form): messages.success(self.request, "{} updated. {}".format( self.object, SEARCH_UPDATE_MESSAGE )) return super(EditShelfView, self).form_valid(form) class DeleteShelfView(LibraryMixin, DeleteView): model = Shelf template_name = "shelf_delete.html" def get_success_url(self): return reverse('shelf-list') def form_valid(self, form): messages.success(self.request, "{} deleted. {}".format( self.object, SEARCH_UPDATE_MESSAGE )) return super(DeleteShelfView, self).form_valid(form) class LastWeekShelfView(LibraryMixin, ListView): model = Book template_name = "shelf.html" paginate_by = 25 paginate_orphans = 5 def get_queryset(self): queryset = super(LastWeekShelfView, self).get_queryset() last_week = datetime.now() - timedelta(days=7) queryset.filter(created__gt=last_week) return queryset def get_context_data(self, **kwargs): context = super(LastWeekShelfView, self).get_context_data(**kwargs) context['shelf_name'] = 'Added in Last Week' context['books'] = context['object_list'] return context class LastMonthShelfView(LibraryMixin, ListView): model = Book template_name = "shelf.html" paginate_by = 25 paginate_orphans = 5 def get_queryset(self): queryset = super(LastMonthShelfView, self).get_queryset() last_week = datetime.now() - timedelta(days=30) queryset.filter(created__gt=last_week) return queryset def get_context_data(self, **kwargs): context = super(LastMonthShelfView, self).get_context_data(**kwargs) context['shelf_name'] = 'Added in Last Month' context['books'] = context['object_list'] return context
Python
0
@@ -17,18 +17,8 @@ port - datetime, tim @@ -24,17 +24,16 @@ medelta%0A -%0A from dja @@ -175,16 +175,50 @@ rect,%0A)%0A +from django.utils import timezone%0A from dja @@ -398,25 +398,24 @@ Shelf,%0A)%0A -%0A from books.v @@ -473,19 +473,18 @@ yMixin,%0A + )%0A -%0A from rea @@ -2792,39 +2792,39 @@ last_week = -datetim +timezon e.now() - timede @@ -2835,32 +2835,39 @@ days=7)%0A +return queryset.filter( @@ -2892,32 +2892,8 @@ eek) -%0A return queryset %0A%0A @@ -3395,22 +3395,23 @@ ast_ -week = datetim +month = timezon e.no @@ -3435,32 +3435,39 @@ ays=30)%0A +return queryset.filter( @@ -3487,37 +3487,14 @@ ast_ -week)%0A return queryset +month) %0A%0A
7997dc0785f124dd3836bc8490c701fe99217a48
add test mode param
umapi/api.py
umapi/api.py
import requests import json from error import UMAPIError, UMAPIRetryError, UMAPIRequestError, ActionFormatError class UMAPI(object): def __init__(self, endpoint, auth): self.endpoint = str(endpoint) self.auth = auth def users(self, org_id, page=0): return self._call('/users/%s/%d' % (org_id, page), requests.get) def groups(self, org_id, page=0): return self._call('/groups/%s/%d' % (org_id, page), requests.get) def action(self, org_id, action): if not isinstance(action, Action): if not isinstance(action, str) and hasattr(action, "__getitem__") or hasattr(action, "__iter__"): actions = [a.data for a in action] else: raise ActionFormatError("action must be iterable, indexable or Action object") else: actions = [action.data] return self._call('/action/%s' % org_id, requests.post, actions) def _call(self, method, call, params=None): data = '' if params: data = json.dumps(params) res = call(self.endpoint+method, data=data, auth=self.auth) if res.status_code == 200: result = res.json() if "result" in result: if result["result"] == "error": raise UMAPIRequestError(result["errors"][0]["errorCode"]) else: return result else: raise UMAPIRequestError("Request Error -- Unknown Result Status") if res.status_code in [429, 502, 503, 504]: raise UMAPIRetryError(res) else: raise UMAPIError(res) class Action(object): def __init__(self, user, *args, **kwargs): self.data = {"user": user} for k, v in kwargs.items(): self.data[k] = v def do(self, *args, **kwargs): self.data["do"] = [] # add "create" / "add" / "removeFrom" first for k, v in kwargs.items(): if k.startswith("create") or k.startswith("addAdobe") or k.startswith("removeFrom"): self.data["do"].append({k: v}) del kwargs[k] # now do the other actions for k, v in kwargs.items(): if k in ['add', 'remove']: self.data["do"].append({k: {"product": v}}) else: self.data["do"].append({k: v}) return self
Python
0.000001
@@ -165,16 +165,33 @@ nt, auth +, test_mode=False ):%0A @@ -247,16 +247,51 @@ h = auth +%0A self.test_mode = test_mode %0A%0A de @@ -913,16 +913,152 @@ n.data%5D%0A + if self.test_mode:%0A return self._call('/action/%25s?testOnly=true' %25 org_id, requests.post, actions)%0A else:%0A
b19429159f3c813297ba2e237abba276045f9ff1
add 0.10.17, mariadb-connector-c dependency (#11044)
var/spack/repos/builtin/packages/r-rmysql/package.py
var/spack/repos/builtin/packages/r-rmysql/package.py
# Copyright 2013-2019 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class RRmysql(RPackage): """Implements 'DBI' Interface to 'MySQL' and 'MariaDB' Databases.""" homepage = "https://github.com/rstats-db/rmysql" url = "https://cran.r-project.org/src/contrib/RMySQL_0.10.9.tar.gz" list_url = "https://cran.r-project.org/src/contrib/Archive/RMySQL" version('0.10.9', '3628200a1864ac3005cfd55cc7cde17a') depends_on('r-dbi', type=('build', 'run')) depends_on('mariadb')
Python
0
@@ -514,16 +514,114 @@ MySQL%22%0A%0A + version('0.10.17', sha256='754df4fce159078c1682ef34fc96aa5ae30981dc91f4f2bada8d1018537255f5')%0A vers @@ -692,16 +692,21 @@ n('r-dbi [email protected]: ', type= @@ -746,11 +746,19 @@ 'mariadb +@:5.5.56 ')%0A
564f1da2c6643a4ef6d27b736620116b144fa2ac
Handle stale PostgreSQL (or others) more gracefully. Closes #3394. Thanks to flfr at stibo.com for the patch.
trac/db/pool.py
trac/db/pool.py
# -*- coding: utf-8 -*- # # Copyright (C) 2005 Edgewall Software # Copyright (C) 2005 Christopher Lenz <[email protected]> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at http://trac.edgewall.com/license.html. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at http://projects.edgewall.com/trac/. # # Author: Christopher Lenz <[email protected]> try: import threading except ImportError: import dummy_threading as threading threading._get_ident = lambda: 0 import time from trac.db.util import ConnectionWrapper class TimeoutError(Exception): """Exception raised by the connection pool when no connection has become available after a given timeout.""" class PooledConnection(ConnectionWrapper): """A database connection that can be pooled. When closed, it gets returned to the pool. """ def __init__(self, pool, cnx): ConnectionWrapper.__init__(self, cnx) self._pool = pool def close(self): if self.cnx: self._pool._return_cnx(self.cnx) self.cnx = None def __del__(self): self.close() class ConnectionPool(object): """A very simple connection pool implementation.""" def __init__(self, maxsize, connector, **kwargs): self._dormant = [] # inactive connections in pool self._active = {} # active connections by thread ID self._available = threading.Condition(threading.Lock()) self._maxsize = maxsize # maximum pool size self._cursize = 0 # current pool size, includes active connections self._connector = connector self._kwargs = kwargs def get_cnx(self, timeout=None): start = time.time() self._available.acquire() try: tid = threading._get_ident() if tid in self._active: self._active[tid][0] += 1 return PooledConnection(self, self._active[tid][1]) while True: if self._dormant: cnx = self._dormant.pop() break elif self._maxsize and self._cursize < self._maxsize: cnx = self._connector.get_connection(**self._kwargs) self._cursize += 1 break else: if timeout: self._available.wait(timeout) if (time.time() - start) >= timeout: raise TimeoutError, 'Unable to get database ' \ 'connection within %d seconds' \ % timeout else: self._available.wait() self._active[tid] = [1, cnx] return PooledConnection(self, cnx) finally: self._available.release() def _return_cnx(self, cnx): self._available.acquire() try: tid = threading._get_ident() if tid in self._active: num, cnx_ = self._active.get(tid) assert cnx is cnx_ if num > 1: self._active[tid][0] = num - 1 else: del self._active[tid] if cnx not in self._dormant: cnx.rollback() if cnx.poolable: self._dormant.append(cnx) else: self._cursize -= 1 self._available.notify() finally: self._available.release() def shutdown(self): self._available.acquire() try: for cnx in self._dormant: cnx.cnx.close() finally: self._available.release()
Python
0.000002
@@ -2266,37 +2266,217 @@ -break +try:%0A cnx.cursor() # check whether the connection is stale%0A break%0A except Exception:%0A cnx.close() %0A
bcff742c27904f995d9f5e8a184f0348b58139a5
fix closing bracket
{{cookiecutter.repo_name}}/fabfile.py
{{cookiecutter.repo_name}}/fabfile.py
# -*- coding: utf-8 -*- import os import datetime from contextlib import contextmanager from fabric.api import env, run, local, prefix, sudo def live(): """Connects to the server.""" env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')] env.user = 'freshmilk' env.cwd = '/var/www/{{cookiecutter.domain_name}}' env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd) def beta(): """Connects to beta/testing server""" env.hosts = [os.environ.get('{{cookiecutter.repo_name}}_host')] env.user = 'freshmilk' env.cwd = '/var/www/beta.{{cookiecutter.domain_name}}' env.connect_to = '{0}@{1}:{2}'.format(env.user, env.hosts[0], env.cwd) def gitpull(tag=None): """Pulls upstream branch on the server.""" if tag is not None: run('git pull') run('git checkout %s' % tag) else: run('git pull') @contextmanager def source_env(): """Actives embedded virtual env""" with prefix('source env/bin/activate'): yield def collectstatic(): """Collect static files on server.""" with source_env(): run('python manage.py collectstatic') def migrate(): """Sync project database on server.""" with source_env(): run('python manage.py migrate') def touch(): """Touch the wsgi file.""" run('touch {{cookiecutter.repo_name}}/wsgi.py') def update(tag=None): """ Runs gitpull, develop, collectstatic, migrate and touch. """ gitpull() collectstatic() migrate() touch() def dump(): with source_env(): run('python manage.py sqldump' def sync_media(): local('rsync -avzh -e ssh %s/media/* media/' % env.connect_to) def sync_dump(): local('rsync -avPhzL -e ssh %s/var/dump.sql.gz var' % env.connect_to) def mirror(): """Runs dump, sync_media, sync_dump and sqlimport.""" dump() sync_dump() local('python manage.py sqlimport') sync_media()
Python
0.000001
@@ -1676,16 +1676,17 @@ sqldump' +) %0D%0A%0D%0A%0D%0Ade
36625021ce3d4f7203601c3fcb8a95f091d1470a
Update uc only if package is included in RDO release
update-uc.py
update-uc.py
#!/usr/bin/env python # # Update OpenStack Oslo and Clients libraries versions in rdoinfo from: # * master branch (default) # curl -OJ http://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=master # * stable/newton # curl -OJ http://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt?h=stable/newton # USAGE # update-uc.py [branch] # If branch is not specified, master i.e. currently ocata-uc is assumed. import copy import ruamel.yaml as yaml import sys RDO = 'rdo.yml' SOURCE_BRANCH = 'source-branch' UC = 'upper-constraints.txt' if len(sys.argv) > 1: UC_RELEASE = sys.argv[1] else: UC_RELEASE = 'ocata-uc' # filter for Oslo and clients def filter_oslo_clients(project): return project.startswith('oslo') or \ project.endswith('client') or \ project == 'osc-lib' def filter_all(project): return True def filter_all_minus_tripleo(project): TRIPLEO_PROJECTS = [ 'diskimage-builder', 'os-apply-config', 'os-cloud-config', 'os-collect-config', 'os-net-config', 'os-refresh-config', 'tripleo-common', 'mistral', 'tempest', ] return project not in TRIPLEO_PROJECTS # load and filter upper-constraints.txt # normalize project name for rdoinfo def load_uc(projects_filter): uc = {} with open(UC, 'rb') as ucfile: for line in ucfile.readlines(): name, version_spec = line.rstrip().split('===') if name and projects_filter(name): version = version_spec.split(';')[0] if version: if name.startswith('python-'): name = name[7:] uc[name.replace('.', '-')] = version return uc def update_uc(): # uc = load_uc(filter_oslo_clients) uc = load_uc(filter_all_minus_tripleo) uc_projects = uc.keys() with open(RDO, 'rb') as infile: info = yaml.load(infile, Loader=yaml.RoundTripLoader) DEFAULT_RELEASES = info['package-default']['tags'] RELEASES_PUPPET = info['package-configs']['rpmfactory-puppet']['tags'] for pkg in info['packages']: project = pkg['project'] if project in uc_projects: new_version = uc[project] # "Setting %s to version %s" % (project, new_version) if 'tags' in pkg: tags = pkg['tags'] if 'version-locked' in tags or 'under-review' in tags: print("Not updating %s, it is version-locked or under" " review" % project) continue prev_version = tags.get(UC_RELEASE) if prev_version: prev_version = prev_version.get(SOURCE_BRANCH) else: if project.startswith('puppet'): tags = copy.copy(RELEASES_PUPPET) else: tags = copy.copy(DEFAULT_RELEASES) prev_version = None tags[UC_RELEASE] = {SOURCE_BRANCH: new_version} if prev_version: if prev_version != new_version: print("%s updated from %s to %s" % (project, prev_version, new_version)) else: print("%s %s already up to date" % (project, new_version)) else: print("%s first time pin to %s" % (project, new_version)) pkg['tags'] = tags uc_projects.remove(project) else: # "%s not found in upper-constraints" % project pass # "Projects not in rdoinfo: %s" % string.join(uc_projects, ' ') with open(RDO, 'w') as outfile: outfile.write(yaml.dump(info, Dumper=yaml.RoundTripDumper, indent=2)) if __name__ == '__main__': update_uc()
Python
0.000012
@@ -3004,16 +3004,212 @@ = None%0A + if UC_RELEASE not in pkg%5B'tags'%5D:%0A print(%22Not updating %25s, it is not included in release %25s%22%0A %25 (project, UC_RELEASE))%0A continue%0A
645c640f38ae67008eb18c79301e19ddfd39c041
use new valgrind repo (#8538)
var/spack/repos/builtin/packages/valgrind/package.py
var/spack/repos/builtin/packages/valgrind/package.py
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * import sys class Valgrind(AutotoolsPackage): """An instrumentation framework for building dynamic analysis. There are Valgrind tools that can automatically detect many memory management and threading bugs, and profile your programs in detail. You can also use Valgrind to build new tools. Valgrind is Open Source / Free Software, and is freely available under the GNU General Public License, version 2. """ homepage = "http://valgrind.org/" url = "https://sourceware.org/pub/valgrind/valgrind-3.13.0.tar.bz2" version('3.13.0', '817dd08f1e8a66336b9ff206400a5369') version('3.12.0', '6eb03c0c10ea917013a7622e483d61bb') version('3.11.0', '4ea62074da73ae82e0162d6550d3f129') version('3.10.1', '60ddae962bc79e7c95cfc4667245707f') version('3.10.0', '7c311a72a20388aceced1aa5573ce970') version('develop', svn='svn://svn.valgrind.org/valgrind/trunk') variant('mpi', default=True, description='Activates MPI support for valgrind') variant('boost', default=True, description='Activates boost support for valgrind') depends_on('mpi', when='+mpi') depends_on('boost', when='+boost') depends_on("autoconf", type='build', when='@develop') depends_on("automake", type='build', when='@develop') depends_on("libtool", type='build', when='@develop') # Apply the patch suggested here: # http://valgrind.10908.n7.nabble.com/Unable-to-compile-on-Mac-OS-X-10-11-td57237.html patch('valgrind_3_12_0_osx.patch', when='@3.12.0 platform=darwin') def configure_args(self): spec = self.spec options = [] if not (spec.satisfies('%clang') and sys.platform == 'darwin'): # Otherwise with (Apple's) clang there is a linker error: # clang: error: unknown argument: '-static-libubsan' options.append('--enable-ubsan') if sys.platform == 'darwin': options.extend([ '--build=amd64-darwin', '--enable-only64bit' ]) return options
Python
0
@@ -2116,44 +2116,46 @@ p', -svn='svn://svn.valgrind +git='git://sourceware .org/ +git/ valgrind /tru @@ -2154,14 +2154,12 @@ rind -/trunk +.git ')%0A%0A
cc4ad5a2e8dc70bb66f80306bd2408d32f492608
make tests pass
ansible/modules/hashivault/hashivault_write.py
ansible/modules/hashivault/hashivault_write.py
#!/usr/bin/env python import warnings from hvac.exceptions import InvalidPath from ansible.module_utils.hashivault import hashivault_argspec from ansible.module_utils.hashivault import hashivault_auth_client from ansible.module_utils.hashivault import hashivault_init from ansible.module_utils.hashivault import hashiwrapper ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'community', 'version': '1.1'} DOCUMENTATION = ''' --- module: hashivault_write version_added: "0.1" short_description: Hashicorp Vault write module description: - Module to write to Hashicorp Vault. options: url: description: - url for vault default: to environment variable VAULT_ADDR ca_cert: description: - "path to a PEM-encoded CA cert file to use to verify the Vault server TLS certificate" default: to environment variable VAULT_CACERT ca_path: description: - "path to a directory of PEM-encoded CA cert files to verify the Vault server TLS certificate : if ca_cert is specified, its value will take precedence" default: to environment variable VAULT_CAPATH client_cert: description: - "path to a PEM-encoded client certificate for TLS authentication to the Vault server" default: to environment variable VAULT_CLIENT_CERT client_key: description: - "path to an unencrypted PEM-encoded private key matching the client certificate" default: to environment variable VAULT_CLIENT_KEY verify: description: - "if set, do not verify presented TLS certificate before communicating with Vault server : setting this variable is not recommended except during testing" default: to environment variable VAULT_SKIP_VERIFY authtype: description: - "authentication type to use: token, userpass, github, ldap, approle" default: token token: description: - token for vault default: to environment variable VAULT_TOKEN username: description: - username to login to vault. default: to environment variable VAULT_USER password: description: - password to login to vault. default: to environment variable VAULT_PASSWORD version: description: - version of the kv engine (int) default: 1 mount_point: description: - secret mount point default: secret secret: description: - vault secret to write. data: description: - Keys and values to write. update: description: - Update rather than overwrite. default: False ''' EXAMPLES = ''' --- - hosts: localhost tasks: - hashivault_write: secret: giant data: foo: foe fie: fum ''' def main(): argspec = hashivault_argspec() argspec['version'] = dict(required=False, type='int', default=1) argspec['mount_point'] = dict(required=False, type='str', default='secret') argspec['secret'] = dict(required=True, type='str') argspec['update'] = dict(required=False, default=False, type='bool') argspec['data'] = dict(required=False, default={}, type='dict') module = hashivault_init(argspec, supports_check_mode=True) result = hashivault_write(module) if result.get('failed'): module.fail_json(**result) else: module.exit_json(**result) def _convert_to_seconds(original_value): try: value = str(original_value) seconds = 0 if 'h' in value: ray = value.split('h') seconds = int(ray.pop(0)) * 3600 value = ''.join(ray) if 'm' in value: ray = value.split('m') seconds += int(ray.pop(0)) * 60 value = ''.join(ray) if value: ray = value.split('s') seconds += int(ray.pop(0)) return seconds except Exception: pass return original_value def hashivault_changed(old_data, new_data): if sorted(old_data.keys()) != sorted(new_data.keys()): return True for key in old_data: old_value = old_data[key] new_value = new_data[key] if old_value == new_value: continue if key != 'ttl' and key != 'max_ttl': return True old_value = _convert_to_seconds(old_value) new_value = _convert_to_seconds(new_value) if old_value != new_value: return True return False @hashiwrapper def hashivault_write(module): result = {"changed": False, "rc": 0} params = module.params client = hashivault_auth_client(params) version = params.get('version') mount_point = params.get('mount_point') secret = params.get('secret') data = params.get('data') if secret.startswith('/'): secret = secret.lstrip('/') mount_point = '' if mount_point: secret_path = '%s/%s' % (mount_point, secret) else: secret_path = secret with warnings.catch_warnings(): warnings.simplefilter("ignore") changed = True write_data = data if params.get('update') or module.check_mode: # Do not move these reads outside of the update read_data = None try: if version == 2: read_data = client.secrets.kv.v2.read_secret_version(secret, mount_point=mount_point) else: read_data = client.read(secret_path) or {} except InvalidPath: read_data = None except Exception as e: result['rc'] = 1 result['failed'] = True error_string = "%s(%s)" % (e.__class__.__name__, e) result['msg'] = u"Error %s reading %s" % (error_string, secret_path) return result if not read_data: read_data = {} read_data = read_data['data']['data'] write_data = dict(read_data) write_data.update(data) result['write_data'] = write_data result['read_data'] = read_data changed = hashivault_changed(read_data, write_data) if changed: if not module.check_mode: try: if version == 2: returned_data = client.secrets.kv.v2.create_or_update_secret(mount_point=mount_point, path=secret, secret=write_data) else: returned_data = client.write(secret_path, **write_data) if returned_data: result['data'] = returned_data if returned_data is None: result['data'] = '' except Exception as e: result['rc'] = 1 result['failed'] = True error_string = "%s(%s)" % (e.__class__.__name__, e) result['msg'] = u"Error %s writing %s" % (error_string, secret_path) return result result['msg'] = u"Secret %s written" % secret_path result['changed'] = changed return result if __name__ == '__main__': main()
Python
0.000003
@@ -6092,16 +6092,116 @@ ead_data +.get('data', %7B%7D)%0A %0A if version == 2:%0A read_data = read_data.get %5B'data'%5D
88a028663b7688af362a2ebd5c168aaccc5695c0
Comment updates
bravado/mapping/request.py
bravado/mapping/request.py
from bravado.mapping.operation import log from bravado.mapping.param import unmarshal_param class RequestLike(object): """ Define a common interface for bravado to interface with server side request objects. Subclasses are responsible for providing attrs for __required_attrs__. """ __required_attrs__ = [ 'path', # dict of URL path parameters 'params', # dict of parameters from the query string and request body. 'headers', # dict of request headers ] def __getattr__(self, name): """ When an attempt to access a required attribute that doesn't exist is made, let the caller know that the type is non-compliant in its attempt to be `RequestList`. This is in place of the usual throwing of an AttributeError. Reminder: __getattr___ is only called when it has already been determined that this object does not have the given attr. :raises: NotImplementedError when the subclass has not provided access to a required attribute. """ if name in self.__required_attrs__: raise NotImplementedError( 'This RequestLike type {0} forgot to implement an attr ' 'for `{1}`'.format(type(self), name)) raise AttributeError( "'{0}' object has no attribute '{1}'".format(type(self), name)) def json(self, **kwargs): """ :return: request content in a json-like form :rtype: int, float, double, string, unicode, list, dict """ raise NotImplementedError("Implement json() in {0}".format(type(self))) def unmarshal_request(request, op): """Unmarshal Swagger request parameters from the passed in request like object. :type request: :class: `bravado.mapping.request.RequestLike`. :type op: :class:`bravado.mapping.operation.Operation` :returns: dict where (key, value) = (param_name, param_value) """ request_data = {} for param_name, param in op.params.iteritems(): param_value = unmarshal_param(param, request) request_data[param_name] = param_value log.debug("Swagger request_data: {0}".format(request_data)) return request_data
Python
0
@@ -130,18 +130,9 @@ -Define a c +C ommo @@ -151,34 +151,8 @@ for -bravado to interface with serv @@ -158,20 +158,16 @@ ver side -%0A request
f0dda4f875c13947d47cf91a58e9a834a5e4a92c
Fix flapping demo geo_location test (#37516)
tests/components/demo/test_geo_location.py
tests/components/demo/test_geo_location.py
"""The tests for the demo platform.""" import unittest from homeassistant.components import geo_location from homeassistant.components.demo.geo_location import ( DEFAULT_UPDATE_INTERVAL, NUMBER_OF_DEMO_DEVICES, ) from homeassistant.const import LENGTH_KILOMETERS from homeassistant.setup import setup_component import homeassistant.util.dt as dt_util from tests.async_mock import patch from tests.common import ( assert_setup_component, fire_time_changed, get_test_home_assistant, ) CONFIG = {geo_location.DOMAIN: [{"platform": "demo"}]} class TestDemoPlatform(unittest.TestCase): """Test the demo platform.""" def setUp(self): """Initialize values for this testcase class.""" self.hass = get_test_home_assistant() self.addCleanup(self.hass.stop) def test_setup_platform(self): """Test setup of demo platform via configuration.""" utcnow = dt_util.utcnow() # Patching 'utcnow' to gain more control over the timed update. with patch("homeassistant.util.dt.utcnow", return_value=utcnow): with assert_setup_component(1, geo_location.DOMAIN): assert setup_component(self.hass, geo_location.DOMAIN, CONFIG) self.hass.block_till_done() # In this test, one zone and geolocation entities have been # generated. all_states = [ self.hass.states.get(entity_id) for entity_id in self.hass.states.entity_ids(geo_location.DOMAIN) ] assert len(all_states) == NUMBER_OF_DEMO_DEVICES for state in all_states: # Check a single device's attributes. if state.domain != geo_location.DOMAIN: # ignore home zone state continue assert ( abs(state.attributes["latitude"] - self.hass.config.latitude) < 1.0 ) assert ( abs(state.attributes["longitude"] - self.hass.config.longitude) < 1.0 ) assert state.attributes["unit_of_measurement"] == LENGTH_KILOMETERS # Update (replaces 1 device). fire_time_changed(self.hass, utcnow + DEFAULT_UPDATE_INTERVAL) self.hass.block_till_done() # Get all states again, ensure that the number of states is still # the same, but the lists are different. all_states_updated = [ self.hass.states.get(entity_id) for entity_id in self.hass.states.entity_ids(geo_location.DOMAIN) ] assert len(all_states_updated) == NUMBER_OF_DEMO_DEVICES assert all_states != all_states_updated
Python
0
@@ -49,16 +49,31 @@ ittest%0A%0A +import pytest%0A%0A from hom @@ -571,16 +571,152 @@ o%22%7D%5D%7D%0A%0A%0A [email protected](autouse=True)%0Adef mock_legacy_time(legacy_patchable_time):%0A %22%22%22Make time patchable for all the tests.%22%22%22%0A yield%0A%0A%0A class Te
27a5d095ddacf848bef9190b90856350c31fc85f
Remove print
seq2seq/decoders/attention_decoder.py
seq2seq/decoders/attention_decoder.py
""" A basic sequence decoder that performs a softmax based on the RNN state. """ from collections import namedtuple import tensorflow as tf from seq2seq.decoders import DecoderBase, DecoderOutput, DecoderStepOutput class AttentionDecoderOutput( namedtuple("DecoderOutput", ["logits", "predictions", "attention_scores"])): """Augmented decoder output that also includes the attention scores. """ pass class AttentionDecoder(DecoderBase): """An RNN Decoder that uses attention over an input sequence. Args: cell: An instance of ` tf.nn.rnn_cell.RNNCell` vocab_size: Output vocabulary size, i.e. number of units in the softmax layer attention_inputs: The sequence to take attentio over. A tensor of shaoe `[B, T, ...]`. attention_fn: The attention function to use. This function map from `(state, inputs)` to `(attention_scores, attention_context)`. For an example, see `seq2seq.decoder.attention.AttentionLayer`. max_decode_length: Maximum length for decoding steps for each example of shape `[B]`. prediction_fn: Optional. A function that generates a predictions of shape `[B]` from a logits of shape `[B, vocab_size]`. By default, this is argmax. """ def __init__(self, cell, vocab_size, attention_inputs, attention_fn, max_decode_length, prediction_fn=None, name="attention_decoder"): super(AttentionDecoder, self).__init__(cell, max_decode_length, name) self.vocab_size = vocab_size self.prediction_fn = prediction_fn self.attention_inputs = attention_inputs self.attention_fn = attention_fn # By default, choose the highest logit score as the prediction if not prediction_fn: self.prediction_fn = lambda logits: tf.stop_gradient(tf.argmax(logits, 1)) @staticmethod def _pack_outputs(outputs_ta, final_loop_state): logits, predictions = DecoderBase._pack_outputs(outputs_ta, final_loop_state) attention_scores = tf.transpose(final_loop_state.pack(), [1, 0, 2]) return AttentionDecoderOutput(logits, predictions, attention_scores) def _step(self, time_, cell_output, cell_state, loop_state, next_input_fn): initial_call = (cell_output is None) if initial_call: cell_output = tf.zeros( [tf.shape(self.attention_inputs)[0], self.cell.output_size]) # Initialize the TensorArray that will hold the attention scores next_loop_state = tf.TensorArray( dtype=tf.float32, size=1, dynamic_size=True) # Compute attention att_scores, attention_context = self.attention_fn(cell_output, self.attention_inputs) # Transform attention context. # This makes the softmax smaller and allows us to synthesize information # between decoder state and attention context # see https://arxiv.org/abs/1508.04025v5 attention_context = tf.contrib.layers.fully_connected( inputs=tf.concat(1, [cell_output, attention_context]), num_outputs=self.cell.output_size, activation_fn=tf.nn.tanh, scope="attention_transform") # In the first step the attention vector is set to all zeros if initial_call: attention_context = tf.zeros_like(attention_context) else: next_loop_state = loop_state.write(time_ - 1, att_scores) # Softmax computation softmax_input = attention_context logits = tf.contrib.layers.fully_connected( inputs=softmax_input, num_outputs=self.vocab_size, activation_fn=None, scope="logits") predictions = self.prediction_fn(logits) outputs = DecoderOutput(logits, predictions) if initial_call: outputs = DecoderOutput( logits=tf.zeros([self.vocab_size]), predictions=tf.zeros( [], dtype=tf.int64)) # Append the attention context to the inputs next_input = next_input_fn(time_, (None if initial_call else cell_output), cell_state, loop_state, outputs) print(next_input) next_input = tf.concat(1, [next_input, attention_context]) return DecoderStepOutput( outputs=outputs, next_input=next_input, next_cell_state=cell_state, next_loop_state=next_loop_state)
Python
0.000016
@@ -4156,30 +4156,8 @@ ts)%0A - print(next_input)%0A
f6912851795e116771d1f82cd79999a3f32b81e3
remove unused import
niworkflows/reports/tests/test_core.py
niworkflows/reports/tests/test_core.py
''' Testing module for niworkflows.reports.core ''' import os from pathlib import Path from pkg_resources import resource_filename import tempfile from itertools import product from pkg_resources import resource_filename as pkgrf import matplotlib.pyplot as plt from bids.layout.writing import build_path import pytest from ..core import Report @pytest.fixture() def bids_sessions(tmpdir_factory): f, _ = plt.subplots() svg_dir = tmpdir_factory.mktemp('work') / 'fmriprep' svg_dir.ensure_dir() pattern = ( "sub-{subject}[/ses-{session}]/{datatype<anat|func>}/" "sub-{subject}[_ses-{session}][_task-{task}][_acq-{acquisition}]" "[_ce-{contrast}][_dir-{direction}][_rec-{reconstruction}]" "[_mod-{modality}][_run-{run}][_echo-{echo}][_space-{space}]" "[_desc-{desc}]_{suffix<dseg|T1w|bold>}.{extension<svg>}" ) subjects = ['01'] tasks = ['t1', 't2', 't3'] runs = ['01', '02', None] descs = ['aroma', 'bbregister', 'carpetplot', 'rois'] # create functional data for both sessions ses1_combos = product(subjects, ['1'], tasks, runs, descs) ses2_combos = product(subjects, ['2'], tasks, [None], descs) # have no runs in the second session (ex: dmriprep test data) # https://github.com/nipreps/dmriprep/pull/59 all_combos = list(ses1_combos) + list(ses2_combos) for subject, session, task, run, desc in all_combos: entities = { 'subject': subject, 'session': session, 'task': task, 'run': run, 'desc': desc, 'extension': 'svg', 'suffix': 'bold', 'datatype': 'func' } bids_path = build_path(entities, pattern) file_path = svg_dir / bids_path file_path.ensure() f.savefig(str(file_path)) # create anatomical data anat_opts = [ {'desc': 'brain'}, {'desc': 'conform'}, {'desc': 'reconall'}, {'desc': 'rois'}, {'suffix': 'dseg'}, {'space': 'MNI152NLin6Asym'}, {'space': 'MNI152NLin2009cAsym'}, ] anat_combos = product(subjects, anat_opts) for subject, anat_opt in anat_combos: anat_entities = { "subject": subject, "datatype": 'anat', "suffix": 't1w' } anat_entities.update(**anat_opt) bids_path = build_path(entities, pattern) file_path = svg_dir / bids_path file_path.ensure() f.savefig(str(file_path)) return svg_dir.dirname @pytest.fixture() def test_report1(): test_data_path = resource_filename( 'niworkflows', os.path.join('data', 'tests', 'work', 'reportlets')) out_dir = tempfile.mkdtemp() return Report(Path(test_data_path), Path(out_dir), 'fakeiuud', subject_id='01', packagename='fmriprep') @pytest.fixture() def test_report2(bids_sessions): out_dir = tempfile.mkdtemp() return Report(Path(bids_sessions), Path(out_dir), 'fakeiuud', subject_id='01', packagename='fmriprep') @pytest.mark.parametrize( "orderings,expected_entities,expected_value_combos", [ (['session', 'task', 'run'], ['task', 'run'], [ ('faketask', None), ('faketask2', None), ('faketaskwithruns', 1), ('faketaskwithruns', 2), ('mixedgamblestask', 1), ('mixedgamblestask', 2), ('mixedgamblestask', 3), ]), (['run', 'task', 'session'], ['run', 'task'], [ (None, 'faketask'), (None, 'faketask2'), (1, 'faketaskwithruns'), (1, 'mixedgamblestask'), (2, 'faketaskwithruns'), (2, 'mixedgamblestask'), (3, 'mixedgamblestask'), ]), ([''], [], []), (['session'], [], []), ([], [], []), (['madeupentity'], [], []), ] ) def test_process_orderings_small(test_report1, orderings, expected_entities, expected_value_combos): report = test_report1 report.init_layout() entities, value_combos = report._process_orderings(orderings, report.layout) assert entities == expected_entities assert expected_value_combos == value_combos @pytest.mark.parametrize( "orderings,expected_entities,first_value_combo,last_value_combo", [ (['session', 'task', 'run'], ['session', 'task', 'run'], ('1', 't1', None), ('2', 't3', None), ), (['run', 'task', 'session'], ['run', 'task', 'session'], (None, 't1', '1'), (2, 't3', '1'), ), ([''], [], None, None), (['session'], ['session'], ('1',), ('2',)), ([], [], None, None), (['madeupentity'], [], None, None), ] ) def test_process_orderings_large(test_report2, orderings, expected_entities, first_value_combo, last_value_combo): report = test_report2 report.init_layout() entities, value_combos = report._process_orderings(orderings, report.layout) if not value_combos: value_combos = [None] assert entities == expected_entities assert value_combos[0] == first_value_combo assert value_combos[-1] == last_value_combo
Python
0.000001
@@ -175,60 +175,8 @@ uct%0A -from pkg_resources import resource_filename as pkgrf %0A%0Aim
29c11f115f63007bceb3abe018ae2300fef39265
fix writing remainder construct after object modification
distance/construct.py
distance/construct.py
"""Facilities for defining fragments with the construct module.""" from construct import ( PascalString, VarInt, Bytes, ConstructError, Const, Select, FocusedSeq, Tell, Mapping, Rebuild, Computed, Compiled, Container, this, len_, ) from distance.base import Fragment from distance.bytes import SKIP_BYTES class C(object): """Provides cons useful for distance .bytes files.""" from construct import ( Struct as struct, Default as default, Byte as byte, Int32sl as int, Int32ul as uint, Int64sl as long, Int64ul as ulong, Float32l as float, Float64l as double, ) str = PascalString(VarInt, encoding='utf-16le') def optional(subcon, otherwise=None): return Select( Mapping(Const(SKIP_BYTES), {otherwise: SKIP_BYTES}), subcon) remainder = FocusedSeq( 'rem', 'pos' / Tell, 'size' / Rebuild(Computed(this._._.sec.content_end - this.pos), len_(this.rem)), 'rem' / Bytes(this.size), ) def _get_subcons(con): try: return con.subcons except AttributeError: pass try: return _get_subcons(con.subcon) except AttributeError: pass if isinstance(con, Compiled): return _get_subcons(con.defersubcon) raise AttributeError(f"could not get subcons of {con}") class ConstructMeta(type): def __init__(cls, name, bases, dct): super().__init__(name, bases, dct) if cls._construct is not None: attrs = {} for con in _get_subcons(cls._construct): if con.name: attrs[con.name] = getattr(con, 'value', None) cls._fields_map = attrs ExposeConstructFields(cls, getattr(cls, '_exposed_fields', None)) class BaseConstructFragment(Fragment, metaclass=ConstructMeta): """Baseclass for fragments defined by construct Structs. Subclasses need to override the `_construct` attribute with the Struct that defines the fragment. """ __slots__ = ('data',) # to be overridden by subclasses _construct = None def _init_defaults(self): self.data = Container() def _clone_data(self, new): new.data = Container(self.data) def _read_section_data(self, dbytes, sec): if sec.content_size: try: self.data = self._construct.parse_stream(dbytes.file, sec=sec) except ConstructError as e: raise ValueError from e else: # Data is empty - game falls back to defaults here. self.data = Container() def _write_section_data(self, dbytes, sec): # If data is empty, game falls back to defaults. if self.data: self._construct.build_stream(self.data, dbytes.file, sec=sec) def _print_data(self, p): super()._print_data(p) if 'allprops' in p.flags: with p.tree_children(): for k, v in self.data.items(): if k != '_io': # construct internal? p.tree_next_child() p(f"Field: {k} = {v!r}") def construct_property(cls, name, doc=None): if doc is None: doc = f"property forwarded to construct field {name!r}" def fget(self): try: return self.data[name] except KeyError as e: try: return cls._fields_map[name] except KeyError: pass raise AssertionError from e def fset(self, value): self.data[name] = value def fdel(self): try: del self.data[name] except KeyError as e: raise AssertionError from e return property(fget, fset, fdel, doc=doc) def ExposeConstructFields(target=None, only=None): """Decorator to expose construct fields as attributes.""" def decorate(target): if only is None: names = (c.name for c in _get_subcons(target._construct) if c.name) else: names = [only] if isinstance(only, str) else only for name in names: setattr(target, name, construct_property(target, name)) return target if target is None: return decorate return decorate(target) # vim:set sw=4 ts=8 sts=4 et:
Python
0.000002
@@ -118,16 +118,29 @@ , Bytes, + GreedyBytes, %0A Con @@ -206,25 +206,18 @@ ng, -Rebuild, Computed +IfThenElse ,%0A @@ -256,14 +256,8 @@ his, - len_, %0A)%0A%0A @@ -956,32 +956,73 @@ ' -size' / Rebuild(Computed +rem' / IfThenElse(this._parsing,%0A Bytes (thi @@ -1059,34 +1059,36 @@ os), - len_(this.rem)),%0A +%0A 'rem @@ -1087,31 +1087,19 @@ -'rem' / Bytes(this.size +GreedyBytes ),%0A
9a7edf9de8de22055462a3cab99ffc62aef36591
remove print
ArangoExec.py
ArangoExec.py
import sublime, sublime_plugin, http.client, socket, types, threading, time, json selectedIndexOptions = -1 class Options: def __init__(self, name): self.name = name connections = sublime.load_settings("ArangoExec.sublime-settings").get('connections') self.host = connections[self.name]['host'] self.port = connections[self.name]['port'] self.username = connections[self.name]['username'] self.password = connections[self.name]['password'] self.database = connections[self.name]['database'] if 'service' in connections[self.name]: self.service = connections[self.name]['service'] def __str__(self): return self.name @staticmethod def list(): names = [] connections = sublime.load_settings("ArangoExec.sublime-settings").get('connections') for connection in connections: names.append(connection) names.sort() return names class Command(): FILE_TYPE_HTML = "html" FILE_TYPE_JSON = "json" FILE_TYPE_XML = "xml" MAX_BYTES_BUFFER_SIZE = 8192 HTML_CHARSET_HEADER = "CHARSET" htmlCharset = "utf-8" def explain(self, query): requestObject = { 'query' : query } urlPart = "/_api/explain" self._execute(requestObject, urlPart) def execute(self, query): requestObject = { 'query' : query, 'count' : True, 'batchSize' :100 } urlPart = "/_api/cursor" self._execute(requestObject, urlPart) def _execute(self, requestObject, urlPart): global selectedIndexOptions if selectedIndexOptions == -1 : selectedIndexOptions = 0 names = Options.list() options = Options(names[selectedIndexOptions]) host = options.host port = options.port timeoutValue = 500 request_page = "/_db/"+ options.database + urlPart requestPOSTBody = json.dumps(requestObject) requestType = "POST" print(request_page) try: # if not(useProxy): #if httpProtocol == self.HTTP_URL: conn = http.client.HTTPConnection(host, port, timeout=timeoutValue) # else: # if len(clientSSLCertificateFile) > 0 or len(clientSSLKeyFile) > 0: # print "Using client SSL certificate: ", clientSSLCertificateFile # print "Using client SSL key file: ", clientSSLKeyFile # conn = httplib.HTTPSConnection( # url, port, timeout=timeoutValue, cert_file=clientSSLCertificateFile, key_file=clientSSLKeyFile) # else: # conn = httplib.HTTPSConnection(url, port, timeout=timeoutValue) conn.request(requestType, request_page, requestPOSTBody) # else: # print "Using proxy: ", proxyURL + ":" + str(proxyPort) # conn = httplib.HTTPConnection(proxyURL, proxyPort, timeout=timeoutValue) # conn.request(requestType, httpProtocol + url + request_page, requestPOSTBody) startReqTime = time.time() resp = conn.getresponse() endReqTime = time.time() startDownloadTime = time.time() (respHeaderText, respBodyText, fileType) = self.getParsedResponse(resp) endDownloadTime = time.time() latencyTimeMilisec = int((endReqTime - startReqTime) * 1000) downloadTimeMilisec = int((endDownloadTime - startDownloadTime) * 1000) respText = self.getResponseTextForPresentation(respHeaderText, respBodyText, latencyTimeMilisec, downloadTimeMilisec) panel = sublime.active_window().new_file() obj = json.loads(respBodyText) prettyRespBodyText = json.dumps(obj, indent = 2, ensure_ascii = False, sort_keys = False, separators = (',', ': ')) panel.set_read_only(False) panel.set_syntax_file("Packages/JavaScript/JSON.tmLanguage") panel.run_command('append', {'characters': prettyRespBodyText}) panel.set_read_only(True) conn.close() except (socket.error, http.client.HTTPException, socket.timeout) as e: print(e) # if not(isinstance(e, types.NoneType)): # respText = "Error connecting: " + str(e) # else: # respText = "Error connecting" except AttributeError as e: print(e) respText = "HTTPS not supported by your Python version" def getParsedResponse(self, resp): fileType = self.FILE_TYPE_HTML resp_status = "%d " % resp.status + resp.reason + "\n" respHeaderText = resp_status for header in resp.getheaders(): respHeaderText += header[0] + ":" + header[1] + "\n" # get resp. file type (html, json and xml supported). fallback to html if header[0] == "content-type": fileType = self.getFileTypeFromContentType(header[1]) respBodyText = "" self.contentLenght = int(resp.getheader("content-length", 0)) # download a 8KB buffer at a time respBody = resp.read(self.MAX_BYTES_BUFFER_SIZE) numDownloaded = len(respBody) self.totalBytesDownloaded = numDownloaded while numDownloaded == self.MAX_BYTES_BUFFER_SIZE: data = resp.read(self.MAX_BYTES_BUFFER_SIZE) respBody += data numDownloaded = len(data) self.totalBytesDownloaded += numDownloaded respBodyText += respBody.decode(self.htmlCharset, "replace") return (respHeaderText, respBodyText, fileType) def getFileTypeFromContentType(self, contentType): fileType = self.FILE_TYPE_HTML contentType = contentType.lower() print ("File type: ", contentType) for cType in self.httpContentTypes: if cType in contentType: fileType = cType return fileType def getResponseTextForPresentation(self, respHeaderText, respBodyText, latencyTimeMilisec, downloadTimeMilisec): return respHeaderText + "\n" + "Latency: " + str(latencyTimeMilisec) + "ms" + "\n" + "Download time:" + str(downloadTimeMilisec) + "ms" + "\n\n\n" + respBodyText def arangoChangeConnection(index): global selectedIndexOptions names = Options.list() selectedIndexOptions = index sublime.status_message(' SQLExec: switched to %s' % names[index]) class arangoListConnection(sublime_plugin.TextCommand): def run(self, edit): sublime.active_window().show_quick_panel(Options.list(), arangoChangeConnection) class ArangoExplainCommand(sublime_plugin.TextCommand): def run(self, edit): Options.list() for region in self.view.sel(): # If no selection, use the entire file as the selection query = '' if region.empty() : query = self.view.substr(sublime.Region(0, self.view.size())) else: query = self.view.substr(sublime.Region(region.a, region.b)) command = Command() command.explain(query) class ArangoExecCommand(sublime_plugin.TextCommand): def run(self, edit): Options.list() for region in self.view.sel(): # If no selection, use the entire file as the selection query = '' if region.empty() : query = self.view.substr(sublime.Region(0, self.view.size())) else: query = self.view.substr(sublime.Region(region.a, region.b)) command = Command() command.execute(query)
Python
0.000793
@@ -2004,35 +2004,8 @@ ST%22%0A - print(request_page) %0A
887b03d7587525509d3652ef42b930025194d2ad
Update 2sum.py
Array/2sum.py
Array/2sum.py
Given an array of integers, find two numbers such that they add up to a specific target number. The function twoSum should return indices of the two numbers such that they add up to the target, where index1 must be less than index2. Please note that your returned answers (both index1 and index2) are not zero-based. You may assume that each input would have exactly one solution. Input: numbers={2, 7, 11, 15}, target=9 Output: index1=1, index2=2 class Solution: # @return a tuple, (index1, index2) def twoSum(self, nums, target): if not nums or len(nums) < 2: return None idict = {} for i, value in enumerate(nums): if target - value in idict: return [idict[target-value], i+1] idict[value] = i+1 # 79ms def twoSum(self, num, target): dic = {} for i in xrange(len(num)): if num[i] in dic: result1 = dic[num[i]] +1 result2 = i +1 else: dic[target-num[i]] = i return (result1,result2) # 68ms def twoSum(self, num, target): tmpnum = num[:] tmpnum.sort() length = len(num) i = 0; j = length-1 while i < j: tmpval = tmpnum[i]+tmpnum[j] if tmpval == target: res1 = num.index(tmpnum[i]) num.reverse() res2 = len(num)-1-num.index(tmpnum[j]) if res1<res2: return (res1+1,res2+1) else: return(res2+1,res1+1) if tmpval > target: j -= 1 if tmpval < target: i += 1
Python
0
@@ -499,16 +499,37 @@ index2)%0A + # 48s%0A # O(n)%0A def
dbdfbc18ebadc0a1d50a6513bb982d2e3881036f
Add MAX_TURNS and some more output to train-ml-bot
train-ml-bot.py
train-ml-bot.py
""" Train a machine learning model for the classifier bot. We create a player, and watch it play games against itself. Every observed state is converted to a feature vector and labeled with the eventual outcome (-1.0: player 2 won, 1.0: player 1 won) This is part of the second worksheet. """ from api import State, util # This package contains various machine learning algorithms import sklearn import sklearn.linear_model from sklearn.externals import joblib from bots.rand import rand # from bots.alphabeta import alphabeta from bots.ml import ml from bots.ml.ml import features import matplotlib.pyplot as plt # How many games to play GAMES = 1000 # Number of planets in the field NUM_PLANETS = 6 # The player we'll observe player = rand.Bot() # player = alphabeta.Bot() data = [] target = [] for g in range(GAMES): state, id = State.generate(NUM_PLANETS) state_vectors = [] while not state.finished(): state_vectors.append(features(state)) move = player.get_move(state) state = state.next(move) winner = state.winner() for state_vector in state_vectors: data.append(state_vector) target.append('won' if winner == 1 else 'lost') if g % (GAMES/10) == 0: print('game {} finished ({}%)'.format(g, (g/float(GAMES)*100) )) # Train a logistic regression model learner = sklearn.linear_model.LogisticRegression() model = learner.fit(data, target) # Check for class imbalance count = {} for str in target: if str not in count: count[str] = 0 count[str] += 1 print('instances per class: {}'.format(count)) # Store the model in the ml directory joblib.dump(model, './bots/ml/model.pkl') print('Done')
Python
0
@@ -376,16 +376,27 @@ orithms%0A +import sys%0A import s @@ -710,16 +710,66 @@ NETS = 6 +%0A# Maximum number of turns to play%0ANUM_TURNS = 100 %0A%0A# The @@ -952,16 +952,26 @@ rs = %5B%5D%0A + i = 0%0A whil @@ -992,18 +992,36 @@ nished() + and i %3C= NUM_TURNS : -%0A %0A @@ -1132,16 +1132,32 @@ (move)%0A%0A + i += 1%0A%0A winn @@ -1307,16 +1307,65 @@ lost')%0A%0A + sys.stdout.write(%22.%22)%0A sys.stdout.flush()%0A if g @@ -1384,16 +1384,34 @@ ) == 0:%0A + print(%22%22)%0A @@ -1471,17 +1471,16 @@ ES)*100) - ))%0A%0A# Tr
249b6a3cd013d23df8748de691a391ef40b5539b
put try except around wx-config --ldflags to support older versions of wxPython
weave/wx_spec.py
weave/wx_spec.py
import common_info from c_spec import common_base_converter import sys,os # these may need user configuration. if sys.platform == "win32": wx_base = r'c:\wxpython-2.3.3.1' else: # probably should do some more discovery here. wx_base = '/usr/lib/wxPython' def get_wxconfig(flag): wxconfig = os.path.join(wx_base,'bin','wx-config') import commands res,settings = commands.getstatusoutput(wxconfig + ' --' + flag) if res: msg = wxconfig + ' failed. Impossible to learn wxPython settings' raise RuntimeError, msg return settings.split() wx_to_c_template = \ """ class %(type_name)s_handler { public: %(c_type)s convert_to_%(type_name)s(PyObject* py_obj, const char* name) { %(c_type)s wx_ptr; // work on this error reporting... if (SWIG_GetPtrObj(py_obj,(void **) &wx_ptr,"_%(type_name)s_p")) handle_conversion_error(py_obj,"%(type_name)s", name); %(inc_ref_count)s return wx_ptr; } %(c_type)s py_to_%(type_name)s(PyObject* py_obj,const char* name) { %(c_type)s wx_ptr; // work on this error reporting... if (SWIG_GetPtrObj(py_obj,(void **) &wx_ptr,"_%(type_name)s_p")) handle_bad_type(py_obj,"%(type_name)s", name); %(inc_ref_count)s return wx_ptr; } }; %(type_name)s_handler x__%(type_name)s_handler = %(type_name)s_handler(); #define convert_to_%(type_name)s(py_obj,name) \\ x__%(type_name)s_handler.convert_to_%(type_name)s(py_obj,name) #define py_to_%(type_name)s(py_obj,name) \\ x__%(type_name)s_handler.py_to_%(type_name)s(py_obj,name) """ class wx_converter(common_base_converter): def __init__(self,class_name="undefined"): self.class_name = class_name common_base_converter.__init__(self) def init_info(self): common_base_converter.init_info(self) # These are generated on the fly instead of defined at # the class level. self.type_name = self.class_name self.c_type = self.class_name + "*" self.return_type = self.class_name + "*" self.to_c_return = None # not used self.check_func = None # not used self.headers.append('"wx/wx.h"') if sys.platform == "win32": # These will be used in many cases self.headers.append('<windows.h>') # These are needed for linking. self.libraries.extend(['kernel32','user32','gdi32','comdlg32', 'winspool', 'winmm', 'shell32', 'oldnames', 'comctl32', 'ctl3d32', 'odbc32', 'ole32', 'oleaut32', 'uuid', 'rpcrt4', 'advapi32', 'wsock32']) # not sure which of these macros are needed. self.define_macros.append(('WIN32', '1')) self.define_macros.append(('__WIN32__', '1')) self.define_macros.append(('_WINDOWS', '1')) self.define_macros.append(('STRICT', '1')) # I think this will only work on NT/2000/XP set # set to 0x0400 for earlier versions. # Hmmm. setting this breaks stuff #self.define_macros.append(('WINVER', '0x0350')) self.library_dirs.append(os.path.join(wx_base,'lib')) self.include_dirs.append(os.path.join(wx_base,'include')) # how do I discover unicode or not unicode?? # non-unicode #self.libraries.append('wxmswh') #self.include_dirs.append(os.path.join(wx_base,'lib','mswdllh')) # unicode self.libraries.append('wxmswuh') self.include_dirs.append(os.path.join(wx_base,'lib','mswdlluh')) self.define_macros.append(('UNICODE', '1')) else: # make sure the gtk files are available # ?? Do I need to link to them? self.headers.append('"gdk/gdk.h"') # !! This shouldn't be hard coded. self.include_dirs.append("/usr/include/gtk-1.2") self.include_dirs.append("/usr/include/glib-1.2") self.include_dirs.append("/usr/lib/glib/include") cxxflags = get_wxconfig('cxxflags') libflags = get_wxconfig('libs') + get_wxconfig('gl-libs') ldflags = get_wxconfig('ldflags') self.extra_compile_args.extend(cxxflags) self.extra_link_args.extend(libflags) self.extra_link_args.extend(ldflags) self.support_code.append(common_info.swig_support_code) def type_match(self,value): is_match = 0 try: wx_class = value.this.split('_')[-2] if wx_class[:2] == 'wx': is_match = 1 except AttributeError: pass return is_match def generate_build_info(self): if self.class_name != "undefined": res = common_base_converter.generate_build_info(self) else: # if there isn't a class_name, we don't want the # we don't want the support_code to be included import base_info res = base_info.base_info() return res def py_to_c_code(self): return wx_to_c_template % self.template_vars() #def c_to_py_code(self): # return simple_c_to_py_template % self.template_vars() def type_spec(self,name,value): # factory class_name = value.this.split('_')[-2] new_spec = self.__class__(class_name) new_spec.name = name return new_spec def __cmp__(self,other): #only works for equal res = -1 try: res = cmp(self.name,other.name) or \ cmp(self.__class__, other.__class__) or \ cmp(self.class_name, other.class_name) or \ cmp(self.type_name,other.type_name) except: pass return res """ # this should only be enabled on machines with access to a display device # It'll cause problems otherwise. def test(level=10): from scipy_test.testing import module_test module_test(__name__,__file__,level=level) def test_suite(level=1): from scipy_test.testing import module_test_suite return module_test_suite(__name__,__file__,level=level) """
Python
0
@@ -4482,41 +4482,220 @@ -ldflags = get_wxconfig('ldflags') +%0A #older versions of wx do not support the ldflags.%0A try:%0A ldflags = get_wxconfig('ldflags')%0A except RuntimeError:%0A ldflags = %5B%5D%0A %0A
c5af0d98407052b9f04e37efc741c9b457825eb7
Fix reading JSON file
Python/scoreP2.py
Python/scoreP2.py
# coding=utf-8 import json import os import numpy as np from scoreCommon import matchInputFile, \ computeCommonMetrics, computeAveragePrecisionMetrics _FEATURE_NAMES = ['globules', 'streaks'] def loadFeatures(featuresPath): try: features = json.load(featuresPath) except ValueError: # TODO: is this the right error type? raise Exception('Could not parse file "%s" as JSON.' % os.path.basename(featuresPath)) if not isinstance(features, dict): raise Exception('JSON file %s does not contain an Object ' '(key-value mapping) at the top-level.' % os.path.basename(featuresPath)) for featureName in _FEATURE_NAMES: if featureName not in features: raise Exception('JSON file "%s" does not contain an element for ' 'feature "%s".' % (os.path.basename(featuresPath), featureName)) if not isinstance(features[featureName], list): raise Exception('Feature "%s" in JSON file "%s" is not an Array.' % (featureName, os.path.basename(featuresPath))) try: features[featureName] = [ float(superpixelValue) for superpixelValue in features[featureName] ] except ValueError: raise Exception('Array for feature "%s" in JSON file "%s" contains ' 'non-floating-point value(s).' % (featureName, os.path.basename(featuresPath))) for superpixelValue in features[featureName]: if not (0.0 <= superpixelValue <= 1.0): raise Exception('Array for feature "%s" in JSON file "%s" ' 'contains a value outside the range ' '[0.0, 1.0].' % (featureName, os.path.basename(featuresPath))) return features def scoreP2Features(truthPath, testPath): truthFeatures = loadFeatures(truthPath) testFeatures = loadFeatures(testPath) scores = [] for featureName in _FEATURE_NAMES: if len(testFeatures[featureName]) != len(truthFeatures[featureName]): raise Exception('Array for feature "%s" in JSON file "%s" is length' ' %d (expected length %d).' % (featureName, os.path.basename(testPath), len(testFeatures[featureName]), len(truthFeatures[featureName]))) # Build the Numpy arrays for calculations truthValues = np.array(truthFeatures[featureName]) testValues = np.array(testFeatures[featureName]) # Compute accuracy, sensitivity, and specificity truthBinaryValues = truthValues > 0.5 testBinaryValues = testValues > 0.5 metrics = computeCommonMetrics(truthBinaryValues, testBinaryValues) # Compute average precision metrics.extend(computeAveragePrecisionMetrics(truthValues, testValues)) # truthPath ~= '/.../ISIC_0000003.json' datasetName = os.path.splitext(os.path.basename(truthPath))[0] scores.append({ 'dataset': '%s_%s' % (datasetName, featureName), 'metrics': metrics }) return scores def scoreP2(truthDir, testDir): scores = [] for truthFile in sorted(os.listdir(truthDir)): testPath = matchInputFile(truthFile, testDir) truthPath = os.path.join(truthDir, truthFile) scores.extend(scoreP2Features(truthPath, testPath)) return scores
Python
0.999549
@@ -249,28 +249,206 @@ -features = json.load +with open(featuresPath) as f:%0A features = json.load(f)%0A except IOError:%0A raise Exception('Internal error: error reading JSON file: %25s'%0A %25 os.path.basename (fea @@ -453,24 +453,25 @@ eaturesPath) +) %0A except
8285d61247c8e8d33cc12e74a39aca6fea40e5bd
Remove unneeded "d".
tests/tabular_output/test_preprocessors.py
tests/tabular_output/test_preprocessors.py
# -*- coding: utf-8 -*- """Test CLI Helpers' tabular output preprocessors.""" from __future__ import unicode_literals from decimal import Decimal from cli_helpers.tabular_output.preprocessors import (align_decimals, bytes_to_string, convert_to_string, quote_whitespaces, override_missing_value, format_numbers) def test_convert_to_string(): """Test the convert_to_string() function.""" data = [[1, 'John'], [2, 'Jill']] headers = [0, 'name'] expected = ([['1', 'John'], ['2', 'Jill']], ['0', 'name']) assert expected == convert_to_string(data, headers) def test_override_missing_values(): """Test the override_missing_values() function.""" data = [[1, None], [2, 'Jill']] headers = [0, 'name'] expected = ([[1, '<EMPTY>'], [2, 'Jill']], [0, 'name']) assert expected == override_missing_value(data, headers, missing_value='<EMPTY>') def test_bytes_to_string(): """Test the bytes_to_string() function.""" data = [[1, 'John'], [2, b'Jill']] headers = [0, 'name'] expected = ([[1, 'John'], [2, 'Jill']], [0, 'name']) assert expected == bytes_to_string(data, headers) def test_align_decimals(): """Test the align_decimals() function.""" data = [[Decimal('200'), Decimal('1')], [ Decimal('1.00002'), Decimal('1.0')]] headers = ['num1', 'num2'] expected = ([['200', '1'], [' 1.00002', '1.0']], ['num1', 'num2']) assert expected == align_decimals(data, headers) def test_align_decimals_empty_result(): """Test align_decimals() with no results.""" data = [] headers = ['num1', 'num2'] expected = ([], ['num1', 'num2']) assert expected == align_decimals(data, headers) def test_quote_whitespaces(): """Test the quote_whitespaces() function.""" data = [[" before", "after "], [" both ", "none"]] headers = ['h1', 'h2'] expected = ([["' before'", "'after '"], ["' both '", "'none'"]], ['h1', 'h2']) assert expected == quote_whitespaces(data, headers) def test_quote_whitespaces_empty_result(): """Test the quote_whitespaces() function with no results.""" data = [] headers = ['h1', 'h2'] expected = ([], ['h1', 'h2']) assert expected == quote_whitespaces(data, headers) def test_quote_whitespaces_non_spaces(): """Test the quote_whitespaces() function with non-spaces.""" data = [["\tbefore", "after \r"], ["\n both ", "none"]] headers = ['h1', 'h2'] expected = ([["'\tbefore'", "'after \r'"], ["'\n both '", "'none'"]], ['h1', 'h2']) assert expected == quote_whitespaces(data, headers) def test_format_integer(): """Test formatting for an INTEGER datatype.""" data = [[1], [1000], [1000000]] headers = ['h1'] result = format_numbers(data, headers, column_types=(int,), decimal_format=',d', float_format=',') expected = [['1'], ['1,000'], ['1,000,000']] assert expected, headers == result def test_format_decimal(): """Test formatting for a DECIMAL(12, 4) datatype.""" data = [[Decimal('1.0000')], [Decimal('1000.0000')], [Decimal('1000000.0000')]] headers = ['h1'] result = format_numbers(data, headers, column_types=(float,), decimal_format=',d', float_format=',') expected = [['1.0000'], ['1,000.0000'], ['1,000,000.0000']] assert expected, headers == result def test_format_float(): """Test formatting for a REAL datatype.""" data = [[1.0], [1000.0], [1000000.0]] headers = ['h1'] result = format_numbers(data, headers, column_types=(float,), decimal_format=',d', float_format=',') expected = [['1.0'], ['1,000.0'], ['1,000,000.0']] assert expected, headers == result def test_format_integer_only(): """Test that providing one format string works.""" data = [[1, 1.0], [1000, 1000.0], [1000000, 1000000.0]] headers = ['h1', 'h2'] result = format_numbers(data, headers, column_types=(int, float), decimal_format=',d') expected = [['1', 1.0], ['1,000', 1000.0], ['1,000,000', 1000000.0]] assert expected, headers == result def test_format_numbers_no_format_strings(): """Test that numbers aren't formatted without format strings.""" data = ((1), (1000), (1000000)) headers = ('h1',) result = format_numbers(data, headers, column_types=(int,)) assert data, headers == result def test_format_numbers_no_column_types(): """Test that numbers aren't formatted without column types.""" data = ((1), (1000), (1000000)) headers = ('h1',) result = format_numbers(data, headers, decimal_format=',d', float_format=',') assert data, headers == result
Python
0.000984
@@ -3215,36 +3215,35 @@ ecimal_format=', -d ',%0A + @@ -3711,33 +3711,32 @@ ecimal_format=', -d ',%0A @@ -4168,33 +4168,32 @@ ecimal_format=', -d ',%0A @@ -4610,17 +4610,16 @@ ormat=', -d ')%0A%0A @@ -5161,32 +5161,32 @@ aders = ('h1',)%0A + result = for @@ -5229,17 +5229,16 @@ ormat=', -d ',%0A
6454bca66b73efa6e124fce80634fc98bd0b9c25
add new dependencies for python 3.7.6
Back/setup.py
Back/setup.py
import os from setuptools import setup, find_packages here = os.path.abspath(os.path.dirname(__file__)) with open(os.path.join(here, 'README.txt')) as f: README = f.read() with open(os.path.join(here, 'CHANGES.txt')) as f: CHANGES = f.read() requires = [ 'pyodbc==4.0.28', 'pyramid==1.10.4', 'sqlalchemy==1.3.12', 'transaction==3.0.0', 'waitress==1.4.2', 'webargs==6.0.0b2' ] setup( name='ns_portal', version='0.3', description='ns_portal', long_description=README + '\n\n' + CHANGES, classifiers=[ "Programming Language :: Python", "Framework :: Pyramid", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", ], author='', author_email='', url='', keywords='web wsgi bfg pylons pyramid', packages=find_packages(), include_package_data=True, zip_safe=False, test_suite='ns_portal', install_requires=requires, entry_points="""\ [paste.app_factory] main = ns_portal:main [console_scripts] initialize_ns_portal_db = ns_portal.scripts.initializedb:main """ )
Python
0.000001
@@ -259,16 +259,42 @@ res = %5B%0A + 'marshmallow==3.3.0',%0A 'pyo @@ -307,9 +307,9 @@ .0.2 -8 +7 ',%0A @@ -425,17 +425,17 @@ ==6.0.0b -2 +3 '%0A %5D%0A @@ -479,17 +479,17 @@ sion='0. -3 +4 ',%0A d
d9c9f9c363f5520f37800930efd9eaa1e43daed7
bump version
ttt/__init__.py
ttt/__init__.py
# -*- coding: utf-8 -*- __version__ = '0.3.1'
Python
0
@@ -41,7 +41,7 @@ 0.3. -1 +2 '%0A
f20055c525577069333271283d921705bf21bfb2
disable bad graphic button for now
tweets/admin.py
tweets/admin.py
from django.contrib import admin from django.utils.safestring import mark_safe from models import Tweet, SearchTerm, Message, MarketAccount from filters import TwitterImageFilter, TweetStatusFilter, TongueGraphicFilter # Register your models here. def mark_deleted(modeladmin, request, queryset): queryset.update(deleted=True) mark_deleted.short_description = 'Hide selected tweets' def mark_approved(modeladmin, request, queryset): queryset.update(approved=True) mark_approved.short_description = 'Mark selected tweets as approved' class BaseAdmin(admin.ModelAdmin): class Media: js = ('js/tweet_admin.js', ) css = { 'all': ('css/adi051.css', ) } class MessageAdmin(BaseAdmin): list_display = ('account', 'type', 'copy') list_filter = ('account', 'type') class TweetAdmin(BaseAdmin): search_fields = ('handle', 'content',) list_display = ('created_at', 'high_priority', 'get_handle', 'account', 'get_image', 'get_autophotoshop', 'get_photoshop', 'content', 'messages', 'tweeted_by', 'get_artworker', 'notes') list_filter = ('account', 'high_priority', TweetStatusFilter, TwitterImageFilter, TongueGraphicFilter, 'artworker', 'tweeted_by', 'created_at', 'tweeted_at', 'entry_allowed') list_editable = ('notes', ) list_per_page = 25 actions = [mark_deleted, ] fieldsets = ( ('Attach your photoshop', { 'fields': ('photoshop', ), }), ('Make high priority', { 'fields': ('high_priority', 'notes'), }), ('View/change autophotoshop', { 'classes': ('collapse', ), 'fields': ('auto_base', ('auto_photoshop_1', 'auto_compose_1'), ('auto_photoshop_2', 'auto_compose_2'), ('auto_photoshop_3', 'auto_compose_3')), }), ('Tweet data', { 'classes': ('collapse', ), 'fields': ('created_at', 'handle', 'account', 'content', 'image_url', 'uid', 'entry_allowed', 'disallowed_reason'), }), ('Sent data', { 'classes': ('collapse', ), 'fields': ('artworker', 'tweeted_by', 'tweeted_at', 'tweet_id', 'sent_tweet', ) }), ) def get_image(self, obj): if obj.image_url: if 'twitpic' in obj.image_url: url = 'http://twitpic.com/show/thumb/{}'.format(obj.image_url.split('/')[-1]) else: url = obj.image_url return mark_safe('<a href="{0}" target="_blank"><img src="{1}" width=100 /></a>'.format(obj.image_url, url)) else: return "N/A" get_image.short_description = 'Original Image' def get_handle(self, obj): return mark_safe(""" <p><a href="http://twitter.com/{0}" target="_blank">{0}</a></p> <p><em>({1} Followers) """.format(obj.handle.encode('utf-8'), obj.followers)) get_handle.short_description = 'User\'s Handle' def messages(self, obj): return mark_safe(""" <ul class="message-btns"> <li><a class="btn btn-danger send_tweet" data-msgtype="tryagain">Image doesn't work</a></li> <li><a class="btn btn-success send_tweet" data-msgtype="imagelink">Tweet tongue graphic</a></li> </ul> """) messages.short_description = 'Tweet back to user' def get_photoshop(self, obj): if obj.photoshop: if obj.tweet_id: # Open up the actual tweet if it's been sent return mark_safe('<a href="http://twitter.com/{0}/status/{2}" target="_blank"><img src={1} width=100 /></a>'.\ format(obj.account.handle, obj.photoshop.url, obj.tweet_id)) else: # Otherwise direct to the local image return mark_safe('<a href="{0}" target="_blank"><img src={0} width=100 /></a>'.format(obj.photoshop.url)) else: return mark_safe('<a class="btn btn-warning" href="/tweets/tweet/{}">Upload</a>'.format(obj.id)) get_photoshop.short_description = 'Tongue Graphic' def get_autophotoshop(self, obj): auto, base, composed = ["N/A", ] * 3, "N/A", ["N/A", ] * 3 if obj.auto_base: base = '<a class="autoshop" href="{0}" target="_blank"><img src={0} /></a><br>'.format(obj.auto_base.url) num_of_files = 3 files = range(1, num_of_files + 1) for cnt in files: if getattr(obj, 'auto_photoshop_%d' % cnt): auto[cnt - 1] = '<a class="autoshop" href="{0}" target="_blank"><img src={0} /></a>'.format(getattr(obj, 'auto_photoshop_%d' % cnt).url) if getattr(obj, 'auto_compose_%d' % cnt): composed[cnt - 1] = '<a class="autoshop" href="{0}" target="_blank"><img src={0} /></a>'.format(getattr(obj, 'auto_compose_%d' % cnt).url) args = [base, ] + auto + composed return mark_safe(""" <table class="autogen-results"> <tr><td colspan="3" align="center" class="base-img">%s</td></tr> <tr><td>%s</td><td>%s</td><td>%s</td></tr> <tr><td>%s</td><td>%s</td><td>%s</td></tr> </table> """ % (args[0], args[1], args[2], args[3], args[4], args[5], args[6])) get_autophotoshop.short_description = 'Automatic Graphic' def get_artworker(self, obj): if obj.artworker: return obj.artworker.username else: return mark_safe(""" <a class="btn btn-info assign-artworker">Start Working!</a> """) get_artworker.short_description = 'Artworker Status' def save_model(self, request, obj, form, change): # TODO: fix bug with this - if a CM edits and saves a tweet directly # this will set the artworker to them if 'photoshop' in form.changed_data: obj.artworker = request.user obj.save() def get_actions(self, request): actions = super(TweetAdmin, self).get_actions(request) if 'delete_selected' in actions: del actions['delete_selected'] return actions def get_queryset(self, request): if request.user.is_superuser: return self.model.everything.get_query_set() return super(TweetAdmin, self).get_queryset(request) admin.site.register(Tweet, TweetAdmin) admin.site.register(SearchTerm, BaseAdmin) admin.site.register(Message, MessageAdmin) admin.site.register(MarketAccount, BaseAdmin)
Python
0
@@ -3022,32 +3022,36 @@ +%3C!-- %3Cli%3E%3Ca class=%22bt @@ -3118,32 +3118,35 @@ 't work%3C/a%3E%3C/li%3E +--%3E %0A
37d97f21ec4e73855a49659ef3a867b1706ac914
Update error messsage (#11197)
datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py
datadog_checks_base/datadog_checks/base/checks/openmetrics/base_check.py
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) from copy import deepcopy import requests from six import PY2 from ...errors import CheckException from ...utils.tracing import traced_class from .. import AgentCheck from .mixins import OpenMetricsScraperMixin STANDARD_FIELDS = [ 'prometheus_url', 'namespace', 'metrics', 'prometheus_metrics_prefix', 'health_service_check', 'include_labels', 'label_to_hostname', 'label_joins', 'labels_mapper', 'type_overrides', 'send_histograms_buckets', 'send_distribution_buckets', 'send_monotonic_counter', 'send_monotonic_with_gauge', 'send_distribution_counts_as_monotonic', 'send_distribution_sums_as_monotonic', 'exclude_labels', 'bearer_token_auth', 'bearer_token_path', 'ignore_metrics', ] class OpenMetricsBaseCheck(OpenMetricsScraperMixin, AgentCheck): """ OpenMetricsBaseCheck is a class that helps scrape endpoints that emit Prometheus metrics only with YAML configurations. Minimal example configuration: instances: - prometheus_url: http://example.com/endpoint namespace: "foobar" metrics: - bar - foo Agent 6 signature: OpenMetricsBaseCheck(name, init_config, instances, default_instances=None, default_namespace=None) """ DEFAULT_METRIC_LIMIT = 2000 HTTP_CONFIG_REMAPPER = { 'ssl_verify': {'name': 'tls_verify'}, 'ssl_cert': {'name': 'tls_cert'}, 'ssl_private_key': {'name': 'tls_private_key'}, 'ssl_ca_cert': {'name': 'tls_ca_cert'}, 'prometheus_timeout': {'name': 'timeout'}, 'request_size': {'name': 'request_size', 'default': 10}, } # Allow tracing for openmetrics integrations def __init_subclass__(cls, **kwargs): super().__init_subclass__(**kwargs) return traced_class(cls) def __init__(self, *args, **kwargs): """ The base class for any Prometheus-based integration. """ args = list(args) default_instances = kwargs.pop('default_instances', None) or {} default_namespace = kwargs.pop('default_namespace', None) legacy_kwargs_in_args = args[4:] del args[4:] if len(legacy_kwargs_in_args) > 0: default_instances = legacy_kwargs_in_args[0] or {} if len(legacy_kwargs_in_args) > 1: default_namespace = legacy_kwargs_in_args[1] super(OpenMetricsBaseCheck, self).__init__(*args, **kwargs) self.config_map = {} self._http_handlers = {} self.default_instances = default_instances self.default_namespace = default_namespace # pre-generate the scraper configurations if 'instances' in kwargs: instances = kwargs['instances'] elif len(args) == 4: # instances from agent 5 signature instances = args[3] elif isinstance(args[2], (tuple, list)): # instances from agent 6 signature instances = args[2] else: instances = None if instances is not None: for instance in instances: possible_urls = instance.get('possible_prometheus_urls') if possible_urls is not None: for url in possible_urls: try: new_instance = deepcopy(instance) new_instance.update({'prometheus_url': url}) scraper_config = self.get_scraper_config(new_instance) response = self.send_request(url, scraper_config) response.raise_for_status() instance['prometheus_url'] = url self.get_scraper_config(instance) break except (IOError, requests.HTTPError, requests.exceptions.SSLError) as e: self.log.info("Couldn't connect to %s: %s, trying next possible URL.", url, str(e)) else: raise CheckException( "The agent could connect to none of the following URL: %s." % possible_urls ) else: self.get_scraper_config(instance) def check(self, instance): # Get the configuration for this specific instance scraper_config = self.get_scraper_config(instance) # We should be specifying metrics for checks that are vanilla OpenMetricsBaseCheck-based if not scraper_config['metrics_mapper']: raise CheckException( "You have to collect at least one metric from the endpoint: {}".format(scraper_config['prometheus_url']) ) self.process(scraper_config) def get_scraper_config(self, instance): """ Validates the instance configuration and creates a scraper configuration for a new instance. If the endpoint already has a corresponding configuration, return the cached configuration. """ endpoint = instance.get('prometheus_url') if endpoint is None: raise CheckException("Unable to find prometheus URL in config file.") # If we've already created the corresponding scraper configuration, return it if endpoint in self.config_map: return self.config_map[endpoint] # Otherwise, we create the scraper configuration config = self.create_scraper_configuration(instance) # Add this configuration to the config_map self.config_map[endpoint] = config return config def _finalize_tags_to_submit(self, _tags, metric_name, val, metric, custom_tags=None, hostname=None): """ Format the finalized tags This is generally a noop, but it can be used to change the tags before sending metrics """ return _tags def _filter_metric(self, metric, scraper_config): """ Used to filter metrics at the beginning of the processing, by default no metric is filtered """ return False # For documentation generation # TODO: use an enum and remove STANDARD_FIELDS when mkdocstrings supports it class StandardFields(object): pass if not PY2: StandardFields.__doc__ = '\n'.join('- `{}`'.format(field) for field in STANDARD_FIELDS)
Python
0
@@ -4272,16 +4272,20 @@ t could +not connect @@ -4291,12 +4291,11 @@ to -none +any of @@ -4311,16 +4311,17 @@ wing URL +s : %25s.%22 %25
c6c74870e6557dbd0523d1b01f377f14b05b632a
Add db check to makeTest to ensure we cover cases where an explicit path to a test function is passed
django_nose/plugin.py
django_nose/plugin.py
import os.path import sys from django.conf import settings from django.db.models.loading import get_apps, load_app from django.test.testcases import TransactionTestCase class ResultPlugin(object): """ Captures the TestResult object for later inspection. nose doesn't return the full test result object from any of its runner methods. Pass an instance of this plugin to the TestProgram and use ``result`` after running the tests to get the TestResult object. """ name = "result" enabled = True def finalize(self, result): self.result = result class DjangoSetUpPlugin(object): """ Configures Django to setup and tear down the environment. This allows coverage to report on all code imported and used during the initialisation of the test runner. Only sets up databases if a single class inherits from ``django.test.testcases.TransactionTestCase``. Also ensures you don't run the same test case multiple times. """ name = "django setup" enabled = True def __init__(self, runner): super(DjangoSetUpPlugin, self).__init__() self.runner = runner self.sys_stdout = sys.stdout self.sys_stderr = sys.stderr self.needs_db = False self.started = False self._registry = set() def begin(self): self.add_apps = set() def wantClass(self, cls): if issubclass(cls, TransactionTestCase): self.needs_db = True if cls in self._registry: return False self._registry.add(cls) def wantMethod(self, method): if issubclass(method.im_class, TransactionTestCase): self.needs_db = True if method in self._registry: return False self._registry.add(method) def wantFunction(self, function): if function in self._registry: return False self._registry.add(function) def beforeImport(self, filename, module): # handle case of tests.models if not os.path.isdir(filename): filepath = os.path.dirname(filename) module = module.rsplit('.', 1)[0] else: filepath = filename models_path = os.path.join(filepath, 'models.py') if os.path.exists(models_path): self.add_apps.add(module) # handle case of fooapp.tests, where fooapp.models exists models_path = os.path.join(filepath, os.pardir, 'models.py') if os.path.exists(models_path): self.add_apps.add(module.rsplit('.', 1)[0]) def prepareTestRunner(self, test): cur_stdout = sys.stdout cur_stderr = sys.stderr sys.stdout = self.sys_stdout sys.stderr = self.sys_stderr if self.add_apps: for app in self.add_apps: if app in settings.INSTALLED_APPS: continue mod = load_app(app) if mod: settings.INSTALLED_APPS.append(app) get_apps() self.runner.setup_test_environment() if self.needs_db: self.old_names = self.runner.setup_databases() sys.stdout = cur_stdout sys.stderr = cur_stderr self.started = True def finalize(self, result): if self.started: if self.needs_db: self.runner.teardown_databases(self.old_names) self.runner.teardown_test_environment()
Python
0
@@ -1936,16 +1936,241 @@ ction)%0A%0A + def makeTest(self, test, parent):%0A if self.needs_db:%0A return%0A if not test.im_class:%0A return%0A if issubclass(test.im_class, TransactionTestCase):%0A self.needs_db = True%0A%0A def
aa3a6dd01d7681f92d1be42fb2831126ced7a76e
Update __init__.py
django_su/__init__.py
django_su/__init__.py
import os # The fake password we will use to authenticate su'ed users SECRET_PASSWORD = os.urandom(64) __version__ = '0.4.8'
Python
0.000072
@@ -101,27 +101,75 @@ 64)%0A -%0A__version__ = '0.4.8' +VERSION = (0, 4, 8)%0A%0A__version__ = '.'.join(%5Bstr(n) for n in VERSION%5D) %0A
3566e996b350b1b5e74caa886b69c17b13ba4913
Add HTTPs support to assertRedirectsTo
django_test_mixins.py
django_test_mixins.py
from django.test import TestCase from django.core.cache import cache import urlparse class HttpCodeTestCase(TestCase): # TODO: this should be a private method. def assertHttpCode(self, response, code, code_description): self.assertEqual( response.status_code, code, "Expected an HTTP %s (%s) response, but got HTTP %s" % (code, code_description, response.status_code)) def assertHttpOK(self, response): self.assertHttpCode(response, 200, "OK") def assertHttpCreated(self, response): self.assertHttpCode(response, 201, "Created") def assertHttpRedirect(self, response, location=None): """Assert that we had any redirect status code. """ self.assertTrue( 300 <= response.status_code < 400, "Expected an HTTP 3XX (redirect) response, but got HTTP %s" % response.status_code ) if location: if location.startswith("http://testserver/"): absolute_location = location else: absolute_location = urlparse.urljoin("http://testserver/", location) self.assertEqual(response['Location'], absolute_location) def assertHttpBadRequest(self, response): self.assertHttpCode(response, 400, "Bad Request") def assertHttpUnauthorized(self, response): self.assertHttpCode(response, 401, "Unauthorized") def assertHttpForbidden(self, response): self.assertHttpCode(response, 403, "Forbidden") def assertHttpNotFound(self, response): self.assertHttpCode(response, 404, "Not Found") def assertHttpMethodNotAllowed(self, response): self.assertHttpCode(response, 405, "Method Not Allowed") class EmptyCacheTestCase(TestCase): """Ensure that every test starts with an empty cache.""" def setUp(self): super(EmptyCacheTestCase, self).setUp() cache.clear() class FormValidationTestCase(TestCase): def assertFormInvalid(self, response, form_name="form"): """Assert that the response contains a form in the context, and that the form failed validation. The form is assumed to be in context[form_name]. If the form has validated when it shouldn't, views often redirect somewhere, so we also check for HTTP 200. """ form = None try: if response.context: form = response.context[form_name] except KeyError: pass if not form: self.fail("Could not find a form in the response.") self.assertFalse(form.is_valid(), "Expected form to be invalid, but it was valid.") status_code = response.status_code self.assertEqual( status_code, 200, "Expected HTTP 200, but got HTTP %d. " "Looks like the form validated when it shouldn't." % status_code) class RedirectTestCase(TestCase): def assertRedirectsTo(self, response, expected_url): """Django's assertRedirects doesn't support external URLs, so we roll our own here. Note that the test client can't fetch external URLs, so we mustn't use fetch=True. """ if response.status_code != 302: self.fail("Did not redirect (got HTTP %s instead)." % response.status_code) if hasattr(response, "redirect_chain"): self.fail("You can't use assertRedirects with follow=True.") final_url = response._headers['location'][1] if not expected_url.startswith('http://'): # we were given a relative URL, so convert it expected_url = "http://testserver%s" % expected_url self.assertEqual( final_url, expected_url, "Expected to be redirected to %s, but got %s instead." % (expected_url, final_url) )
Python
0
@@ -2598,32 +2598,24 @@ response.%22)%0A - %0A sel @@ -3471,24 +3471,16 @@ True.%22)%0A - %0A @@ -3575,16 +3575,60 @@ ttp://') + and not expected_url.startswith('https://') :%0A
8bee6d753e623cdd09353be262b2b6b395d7c7c8
Check stream when close socket
stormed/connection.py
stormed/connection.py
import time import socket from tornado.iostream import IOStream from tornado.ioloop import IOLoop from stormed.util import logger from stormed.frame import FrameReader, FrameHandler, status from stormed.channel import Channel from stormed.method.connection import Close TORNADO_1_2 = hasattr(IOStream, 'connect') class Connection(FrameHandler): """A "physical" TCP connection to the AMQP server heartbeat: int, optional the requested time interval in seconds for heartbeat frames. Connection.on_error callback, when set, is called in case of "hard" AMQP Error. It receives a ConnectionErrorinstance as argument: def handle_error(conn_error): print conn_error.method print conn_error.reply_code conn.on_error = handle_error Connection.on_disconnect callback, when set, is called in case of heartbeat timeout or TCP low level disconnection. It receives no args. """ def __init__(self, host, username='guest', password='guest', vhost='/', port=5672, heartbeat=0, io_loop=None): self.host = host self.port = port self.username = username self.password = password self.vhost = vhost self.heartbeat = heartbeat self.last_received_frame = None self.frame_max = 0 self.io_loop = io_loop or IOLoop.instance() self.stream = None self.status = status.CLOSED self.channels = [self] self.channel_id = 0 self.on_connect = None self.on_disconnect = None self.on_error = None self._close_callback = None self._frame_count = 0 super(Connection, self).__init__(connection=self) def connect(self, callback): """open the connection to the server""" if self.status is not status.CLOSED: raise Exception('Connection status is %s' % self.status) self.status = status.OPENING sock = socket.socket() sock.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1) self.on_connect = callback if TORNADO_1_2: self.stream = IOStream(sock, io_loop=self.io_loop) self.stream.set_close_callback(self.on_closed_stream) self.stream.connect((self.host, self.port), self._handshake) else: sock.connect((self.host, self.port)) self.stream = IOStream(sock, io_loop=self.io_loop) self.stream.set_close_callback(self.on_closed_stream) self._handshake() def close(self, callback=None): """cleanly closes the connection to the server. all pending tasks are flushed before connection shutdown""" if self.status != status.CLOSING: self._close_callback = callback self.status = status.CLOSING channels = [ch for ch in self.channels if ch is not self] opened_chs = [ch for ch in channels if ch.status in (status.OPENED, status.OPENING)] closing_chs = [ch for ch in channels if ch.status == status.CLOSING] if opened_chs: for ch in opened_chs: ch.close(self.close) elif closing_chs: pass # let's wait else: m = Close(reply_code=0, reply_text='', class_id=0, method_id=0) self.send_method(m, self._close_callback) def channel(self, callback=None): """get a Channel instance""" if self.status == status.OPENED: ch = Channel(channel_id=len(self.channels), conn=self) self.channels.append(ch) ch.open(callback) return ch else: raise ValueError('connection is not opened') def _handshake(self): self.stream.write('AMQP\x00\x00\x09\x01') FrameReader(self.stream, self._frame_loop) def _frame_loop(self, frame): if self.heartbeat: self.last_received_frame = time.time() self.channels[frame.channel].process_frame(frame) self._frame_count += 1 if self.stream: # Every 5 frames ioloop gets the control back in order # to avoid hitting the recursion limit # reading one frame cost 13 levels of stack recursion # TODO check if always using _callbacks is faster that frame # counting if self._frame_count == 5: self._frame_count = 0 cb = lambda: FrameReader(self.stream, self._frame_loop) self._add_ioloop_callback(cb) else: FrameReader(self.stream, self._frame_loop) if TORNADO_1_2: def _add_ioloop_callback(self, callback): self.io_loop._callbacks.append(callback) else: def _add_ioloop_callback(self, callback): self.io_loop._callbacks.add(callback) def close_stream(self): self.status = status.CLOSED self.stream.close() self.stream = None def on_closed_stream(self): if self.status != status.CLOSED: if self.on_disconnect: try: self.on_disconnect() except Exception: logger.error('ERROR in on_disconnect() callback', exc_info=True) def reset(self): for c in self.channels: if c is not self: c.reset() super(Connection, self).reset() self.close_stream()
Python
0
@@ -4959,32 +4959,85 @@ = status.CLOSED%0A +%0A if self.stream is None:%0A return%0A%0A self.str
f860d338ae22c73ad7e313bf9cd268014be138db
Add Dense Layer to SpecGAN Discriminator
structures/SpecGAN.py
structures/SpecGAN.py
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import tensorflow as tf from tensorflow.keras.layers import Dense, ReLU, LeakyReLU, Conv2D, Conv2DTranspose, Reshape, AveragePooling1D, Flatten from tensorflow.keras import Model, Sequential class Generator(Model): def __init__(self, channels=1, d=4): super(Generator, self).__init__() layers = [] layers.append(Dense(256*d)) layers.append(Reshape((4, 4, 16*d))) layers.append(ReLU()) layers.append(Conv2DTranspose(filters=8*d, kernel_size=(6,6), strides=(2,2), padding='same')) layers.append(ReLU()) layers.append(Conv2DTranspose(filters=4*d, kernel_size=(6,6), strides=(2,2), padding='same')) layers.append(ReLU()) layers.append(Conv2DTranspose(filters=2*d, kernel_size=(6,6), strides=(2,2), padding='same')) layers.append(ReLU()) layers.append(Conv2DTranspose(filters=1*d, kernel_size=(6,6), strides=(2,2), padding='same')) layers.append(ReLU()) layers.append(Conv2DTranspose(filters=channels, kernel_size=(6,6), strides=(2,2), padding='same')) layers.append(ReLU()) layers.append(Conv2DTranspose(filters=channels, kernel_size=(6,6), strides=(1,2), padding='same')) self.l = Sequential(layers) def call(self, z): return self.l(z) class Discriminator(Model): def __init__(self, channels=1, d=4): super(Discriminator, self).__init__() layers = [] layers.append(Conv2D(filters=d, kernel_size=(6,6), strides=(2,2))) layers.append(LeakyReLU(alpha=0.2)) layers.append(Conv2D(filters=2*d, kernel_size=(6,6), strides=(2,2))) layers.append(LeakyReLU(alpha=0.2)) layers.append(Conv2D(filters=4*d, kernel_size=(6,6), strides=(2,2))) layers.append(LeakyReLU(alpha=0.2)) layers.append(Conv2D(filters=8*d, kernel_size=(6,6), strides=(2,2))) layers.append(LeakyReLU(alpha=0.2)) layers.append(Conv2D(filters=16*d, kernel_size=(6,6), strides=(2,2))) layers.append(LeakyReLU(alpha=0.2)) self.l = Sequential(layers) def call(self, x): return self.l(x)
Python
0.000001
@@ -1937,36 +1937,24 @@ init__(self, - channels=1, d=4):%0A @@ -2087,32 +2087,48 @@ ), strides=(2,2) +, padding='same' ))%0A layer @@ -2224,32 +2224,48 @@ ), strides=(2,2) +, padding='same' ))%0A layer @@ -2361,32 +2361,48 @@ ), strides=(2,2) +, padding='same' ))%0A layer @@ -2498,32 +2498,48 @@ ), strides=(2,2) +, padding='same' ))%0A layer @@ -2640,20 +2640,36 @@ trides=( -2 +1 ,2) +, padding='same' ))%0A @@ -2699,32 +2699,97 @@ eLU(alpha=0.2))%0A + layers.append(Flatten())%0A layers.append(Dense(1))%0A %0A
f623312b5df6e8f201f641f87193075e8d3f70ea
Add version attribute
nuxeo-drive-client/nxdrive/__init__.py
nuxeo-drive-client/nxdrive/__init__.py
Python
0
@@ -0,0 +1,23 @@ +_version_ = '1.0.0-dev'
a5d249fc056a58dbcbf995b8686e06f44f83395d
Tidy up
src/common.py
src/common.py
#!/usr/bin/env python # encoding: utf-8 # # Copyright (c) 2017 Dean Jackson <[email protected]> # # MIT Licence. See http://opensource.org/licenses/MIT # # Created on 2017-11-14 # """Common workflow variables and functions.""" from __future__ import print_function, absolute_import from collections import OrderedDict import logging import os from workflow import Variables log = logging.getLogger('workflow') # Default workflow settings DEFAULT_SETTINGS = { 'locales': [ 'en', 'de_DE', 'es_ES', 'fr_FR', ], } DOCS_URL = 'https://github.com/deanishe/alfred-fakeum/blob/master/README.md' HELP_URL = u'https://www.alfredforum.com/topic/5319-fakeum-—-generate-fake-test-datasets-in-alfred/' ISSUE_URL = 'https://github.com/deanishe/alfred-fakeum/issues' UPDATE_SETTINGS = {'github_slug': 'deanishe/alfred-fakeum'} # Workflow icons ICON_DOCS = 'icons/docs.png' ICON_HELP = 'icons/help.png' ICON_ISSUE = 'icons/issue.png' ICON_ON = 'icons/on.png' ICON_OFF = 'icons/off.png' ICON_LOCALES = 'icons/locales.png' ICON_UPDATE_CHECK = 'icons/update-check.png' ICON_UPDATE_AVAILABLE = 'icons/update-available.png' # All locales supported by faker ALL_LOCALES = OrderedDict(( ('en', 'English'), ('de_DE', 'German'), ('es', 'Spanish'), ('fr_FR', 'French'), ('ar_AA', 'Arabic'), ('ar_EG', 'Arabic (Egypt)'), ('ar_JO', 'Arabic (Jordan)'), ('ar_PS', 'Arabic (Palestine)'), ('ar_SA', 'Arabic (Saudi Arabia)'), ('bs_BA', 'Bosnian'), ('bg_BG', 'Bulgarian'), ('zh_CN', 'Chinese (China)'), ('zh_TW', 'Chinese (Taiwan)'), ('hr_HR', 'Croatian'), ('cs_CZ', 'Czech'), ('dk_DK', 'Danish'), ('nl_NL', 'Dutch'), ('nl_BE', 'Dutch (Belgium)'), ('en_AU', 'English (Australia)'), ('en_CA', 'English (Canada)'), ('en_GB', 'English (Great Britain)'), ('en_TH', 'English (Thailand)'), ('en_US', 'English (United States)'), ('et_EE', 'Estonian'), ('fi_FI', 'Finnish'), ('fr_CH', 'French (Switzerland)'), ('ka_GE', 'Georgian'), ('de_AT', 'German (Austria)'), ('tw_GH', 'Ghanaian'), ('el_GR', 'Greek'), ('he_IL', 'Hebrew'), ('hi_IN', 'Hindi'), ('hu_HU', 'Hungarian'), ('id_ID', 'Indonesian'), ('it_IT', 'Italian'), ('ja_JP', 'Japanese'), ('ko_KR', 'Korean'), ('la', 'Latin'), ('lv_LV', 'Latvian'), ('lt_LT', 'Lithuanian'), ('ne_NP', 'Nepali'), ('no_NO', 'Norwegian'), ('fa_IR', 'Persian'), ('pl_PL', 'Polish'), ('pt_BR', 'Portuguese (Brazil)'), ('pt_PT', 'Portuguese (Portugal)'), ('ru_RU', 'Russian'), ('sk_SK', 'Slovakian'), ('sl_SI', 'Slovenian'), ('es_MX', 'Spanish (Mexico)'), ('es_ES', 'Spanish (Spain)'), ('sv_SE', 'Swedish'), ('th_TH', 'Thai'), ('tr_TR', 'Turkish'), ('uk_UA', 'Ukranian'), )) # Workflow's bundle IDs BUNDLE_ID = os.getenv('alfred_workflow_bundleid') # Script Filter keyword KEYWORD = os.getenv('keyword') # AppleScript to run an Alfred search SEARCH_AS = u'tell application "Alfred 3" to search "{query}"' def boolvar(name, default=False): """Return `True` or `False` for a workflow variable.""" v = os.getenv(name) if v is not None: if v.lower() in ('1', 'on', 'yes'): return True if v.lower() in ('0', 'off', 'no'): return False log.debug('no value set for workflow variable "%s", ' 'using default: %r', name, default) return default def intvar(name, default=0): """Return `int` for a workflow variable.""" v = os.getenv(name) if v is not None: try: v = int(v) except ValueError: log.error('bad value for "%s": "%s" is not a number', name, v) return default return v log.debug('no value set for workflow variable "%s", ' 'using default: %r', name, default) return default def notify(title, text=''): """Show a notification.""" if not boolvar('SHOW_NOTIFICATIONS'): return v = Variables(title=title, text=text) print(v)
Python
0.000027
@@ -2967,110 +2967,8 @@ ')%0A%0A -# AppleScript to run an Alfred search%0ASEARCH_AS = u'tell application %22Alfred 3%22 to search %22%7Bquery%7D%22'%0A%0A %0Adef
96479e7a883af913a3c13bd73f26f0aa49c63621
Create target directory.
wikiconv/conversation_reconstruction/construct_utils/reconstruct_conversation.py
wikiconv/conversation_reconstruction/construct_utils/reconstruct_conversation.py
import apache_beam as beam import logging import copy import json import tempfile import resource import os import shutil from google.cloud import storage from construct_utils.conversation_constructor import Conversation_Constructor class ReconstructConversation(beam.DoFn): def merge(self, ps1, ps2): # Merge two page states, ps1 is the later one deleted_ids_ps2 = {d[1]:d for d in ps2['deleted_comments']} deleted_ids_ps1 = {d[1]:d for d in ps1['deleted_comments']} deleted_ids_ps2.update(deleted_ids_ps1) extra_ids = [key for key in deleted_ids_ps2.keys() if key not in deleted_ids_ps1] ret_p = copy.deepcopy(ps1) ret_p['deleted_comments'] = list(deleted_ids_ps2.values()) conv_ids = ps2['conversation_id'] auth = ps2['authors'] ret_p['conversation_id'] = ret_p['conversation_id'] ret_p['authors'] = ret_p['authors'] for i in extra_ids: ret_p['conversation_id'][i] = conv_ids[i] ret_p['authors'][i] = auth[i] ret_p['conversation_id'] = ret_p['conversation_id'] ret_p['authors'] = ret_p['authors'] return ret_p def process(self, info, tmp_input): """ Args: bucket: a cloud storage bucket. tmp_input: a path to copy JSON revision files from. This allows data to be copied to this local machine's disk for external sorting (when there are more revisions than can fit in memory). """ LOG_INTERVAL = 100 # The max memory used in of this process KB, before warning are logged. MEMORY_THERESHOLD = 1000000 (page_id, data) = info if (page_id == None): return logging.info('USERLOG: Reconstruction work start on page: %s' % page_id) # Load input from cloud last_revision = data['last_revision'] page_state = data['page_state'] error_log = data['error_log'] # Clean type formatting if last_revision != []: assert(len(last_revision) == 1) last_revision = last_revision[0] else: last_revision = None if page_state != []: assert(len(page_state) == 1) page_state = page_state[0] page_state['page_state']['actions'] = \ {int(pos) : tuple(val) for pos, val in page_state['page_state']['actions'].iteritems()} page_state['authors'] = \ {action_id: [tuple(author) for author in authors] \ for action_id, authors in page_state['authors'].iteritems()} else: page_state = None if error_log != []: assert(len(error_log) == 1) error_log = error_log[0] else: error_log = None rev_ids = [] rev_ids = data['to_be_processed'] # Return when the page doesn't have updates to be processed if len(rev_ids) == 0 or (error_log and error_log['rev_id'] <= min(r['rev_id'] for r in rev_ids)): assert((last_revision and page_state) or \ ((last_revision is None) and (page_state is None))) if last_revision: yield beam.pvalue.TaggedOutput('last_revision', json.dumps(last_revision)) yield beam.pvalue.TaggedOutput('page_states', json.dumps(page_state)) if error_log: yield beam.pvalue.TaggedOutput('error_log', json.dumps(error_log)) logging.info('Page %s has no sufficient input in this time period.' % (page_id)) return processor = Conversation_Constructor() if page_state: logging.info('Page %s existed: loading page state.' % (page_id)) # Load previous page state. processor.load(page_state['deleted_comments']) latest_content = last_revision['text'] else: latest_content = "" # Initialize last_revision_id = 'None' page_state_bak = None cnt = 0 # Sort revisions by temporal order in memory. revision_lst = sorted(rev_ids, key=lambda x: (x['timestamp'], x['rev_id'])) last_loading = 0 logging.info('Reconstruction on page %s started.' % (page_id)) if 'text' not in revision_lst[0]: tempfile_path = tempfile.mkdtemp() if tmp_input.startswith('gs://'): storage_client = storage.Client() bucket_name_end = tmp_input.find('/', 5) bucket = storage_client.get_bucket(tmp_input[5:bucket_name_end]) prefix = os.path.join(tmp_input[bucket_name_end+1:], page_id) for blob in bucket.list_blobs(delimiter='/', prefix=prefix): suffix = os.path.basename(blob.name) blob.download_to_filename(os.path.join(tempfile_path, page_id, suffix)) else: shutil.copytree(os.path.join(tmp_input, page_id), os.path.join(tempfile_path, page_id)) for key in revision_lst: if 'text' not in key: with open(os.path.join(tempfile_path, page_id, str(key['rev_id'])), 'r') as f: revision = json.load(f) os.remove(os.path.join(tempfile_path, page_id, str(key['rev_id']))) else: revision = key revision['rev_id'] = int(revision['rev_id']) # Process revision by revision. if 'rev_id' not in revision: continue cnt += 1 last_revision_id = revision['rev_id'] if revision['text'] == None: revision['text'] = "" logging.debug("REVISION CONTENT: %s" % revision['text']) try: page_state, actions, latest_content = \ processor.process(page_state, latest_content, revision) except AssertionError: yield beam.pvalue.TaggedOutput('error_log', \ json.dumps({'page_id': page_id, 'rev_id': last_revision_id})) break for action in actions: yield json.dumps(action) memory_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss if memory_used >= MEMORY_THERESHOLD: logging.warn("MEMORY USED MORE THAN THERESHOLD in PAGE %s REVISION %d : %d KB" % (revision['page_id'], revision['rev_id'], memory_used)) if (cnt % LOG_INTERVAL == 0 and cnt) and page_state: # Reload after every LOG_INTERVAL revisions to keep the low memory # usage. processor = Conversation_Constructor() page_state_bak = copy.deepcopy(page_state) last_loading = cnt processor.load(page_state['deleted_comments']) page_state['deleted_comments'] = [] revision = None if page_state_bak and cnt != last_loading: # Merge the last two page states if a reload happens while processing, # otherwise in a situation where a week's data contains LOG_INTERVAL + 1 # revisions, the page state may only contain data from one revision. page_state = self.merge(page_state, page_state_bak) if error_log: yield beam.pvalue.TaggedOutput('error_log', json.dumps(error_log)) yield beam.pvalue.TaggedOutput('page_states', json.dumps(page_state)) yield beam.pvalue.TaggedOutput('last_revision', json.dumps( {'page_id': page_id, 'text': latest_content})) logging.info('USERLOG: Reconstruction on page %s complete! last revision: %s' % (page_id, last_revision_id))
Python
0
@@ -4321,24 +4321,65 @@ %5D, page_id)%0A + os.mkdir(tempfile_path, page_id)%0A for
c9277fa65afcf513c2e3000193d7837900ff8ee1
Improve logging runtime state poll fail message
src/nodeconductor_openstack/tasks/base.py
src/nodeconductor_openstack/tasks/base.py
from celery import shared_task from nodeconductor.core.tasks import Task from .. import models # TODO: move this signal to itacloud assembly application @shared_task def register_instance_in_zabbix(instance_uuid): from nodeconductor.template.zabbix import register_instance instance = models.Instance.objects.get(uuid=instance_uuid) register_instance(instance) class RuntimeStateException(Exception): pass class PollRuntimeStateTask(Task): max_retries = 300 default_retry_delay = 5 def get_backend(self, instance): return instance.get_backend() def execute(self, instance, backend_pull_method, success_state, erred_state): backend = self.get_backend(instance) getattr(backend, backend_pull_method)(instance) instance.refresh_from_db() if instance.runtime_state not in (success_state, erred_state): self.retry() elif instance.runtime_state == erred_state: raise RuntimeStateException( 'Instance %s (PK: %s) runtime state become erred: %s' % (instance, instance.pk, erred_state)) return instance class PollBackendCheckTask(Task): max_retries = 60 default_retry_delay = 5 def get_backend(self, instance): return instance.get_backend() def execute(self, instance, backend_check_method): # backend_check_method should return True if object does not exist at backend backend = self.get_backend(instance) if not getattr(backend, backend_check_method)(instance): self.retry() return instance
Python
0.000001
@@ -1008,24 +1008,18 @@ ' -Instance +%25s %25s (PK: @@ -1058,16 +1058,66 @@ %25s' %25 ( +%0A instance.__class__.__name__, instance
f165ddf15914bd9b1c3720e99b3171fb73d331a3
kNN remove l5 distance
ParamSklearn/components/classification/k_nearest_neighbors.py
ParamSklearn/components/classification/k_nearest_neighbors.py
import sklearn.neighbors from HPOlibConfigSpace.configuration_space import ConfigurationSpace from HPOlibConfigSpace.hyperparameters import CategoricalHyperparameter, \ Constant, UnParametrizedHyperparameter, UniformIntegerHyperparameter from HPOlibConfigSpace.conditions import EqualsCondition from ParamSklearn.components.classification_base import ParamSklearnClassificationAlgorithm from ParamSklearn.util import DENSE, SPARSE, PREDICTIONS class KNearestNeighborsClassifier(ParamSklearnClassificationAlgorithm): def __init__(self, n_neighbors, weights, algorithm='auto', p=2, leaf_size=30, random_state=None): self.n_neighbors = int(n_neighbors) if weights not in ("uniform", "distance"): raise ValueError("'weights' should be in ('uniform', 'distance'): " "%s" % weights) self.weights = weights #if metric not in ("euclidean", "manhattan", "chebyshev", "minkowski"): # raise ValueError("'metric' should be in ('euclidean', # 'chebyshev', " # "'manhattan', 'minkowski'): %s" % metric) #self.metric = metric self.algorithm = algorithm self.p = int(p) self.leaf_size = int(leaf_size) self.random_state = random_state def fit(self, X, Y): self.estimator = \ sklearn.neighbors.KNeighborsClassifier( n_neighbors=self.n_neighbors, weights=self.weights, p=self.p, algorithm=self.algorithm, leaf_size=self.leaf_size) self.estimator.fit(X, Y) return self def predict(self, X): if self.estimator is None: raise NotImplementedError() return self.estimator.predict(X) def predict_proba(self, X): if self.estimator is None: raise NotImplementedError() return self.estimator.predict_proba(X) @staticmethod def get_properties(): return {'shortname': 'KNN', 'name': 'K-Nearest Neighbor Classification', 'handles_missing_values': False, 'handles_nominal_values': False, 'handles_numerical_features': True, 'prefers_data_scaled': True, # Find out if this is good because of sparsity 'prefers_data_normalized': False, 'handles_regression': False, 'handles_classification': True, 'handles_multiclass': True, 'handles_multilabel': False, 'is_deterministic': True, 'handles_sparse': True, 'input': (DENSE, SPARSE), 'output': PREDICTIONS, # TODO find out what is best used here! 'preferred_dtype' : None} @staticmethod def get_hyperparameter_search_space(dataset_properties=None): n_neighbors = UniformIntegerHyperparameter( name="n_neighbors", lower=1, upper=100, default=1) weights = CategoricalHyperparameter( name="weights", choices=["uniform", "distance"], default="uniform") algorithm = Constant(name='algorithm', value="auto") if dataset_properties is not None and dataset_properties.get('sparse'): p_choices = [1, 2] else: p_choices = [1, 2, 5] p = CategoricalHyperparameter( name="p", choices=p_choices, default=2) leaf_size = Constant(name="leaf_size", value=30) # Unparametrized # TODO: If we further parametrize 'metric' we need more metric params #metric = UnParametrizedHyperparameter(name="metric", value="minkowski") cs = ConfigurationSpace() cs.add_hyperparameter(n_neighbors) cs.add_hyperparameter(weights) #cs.add_hyperparameter(metric) cs.add_hyperparameter(algorithm) cs.add_hyperparameter(p) cs.add_hyperparameter(leaf_size) # Conditions #metric_p = EqualsCondition(parent=metric, child=p, value="minkowski") #cs.add_condition(metric_p) return cs
Python
0.999757
@@ -3204,167 +3204,8 @@ o%22)%0A - if dataset_properties is not None and dataset_properties.get('sparse'):%0A p_choices = %5B1, 2%5D%0A else:%0A p_choices = %5B1, 2, 5%5D%0A @@ -3273,17 +3273,14 @@ ces= -p_choices +%5B1, 2%5D , de @@ -3350,193 +3350,8 @@ 0)%0A%0A - # Unparametrized%0A # TODO: If we further parametrize 'metric' we need more metric params%0A #metric = UnParametrizedHyperparameter(name=%22metric%22, value=%22minkowski%22)%0A%0A
2474e584b228c45bba01e97ef8c23185b7b1d31c
Update j2cl_library so matched by open-source replacement
build_def/j2cl_library.bzl
build_def/j2cl_library.bzl
"""j2cl_library build macro Takes Java source, translates it into Closure style JS and surfaces it to the rest of the build tree with a js_common.provider. Generally library rules dep on other library rules for reference resolution and this build macro is no exception. In particular the deps this rule needs for reference resolution are java_library() targets which will have been created by other invocations of this same j2cl_library build macro. Example use: # Effectively creates js_library(name="Foo") containing translated JS. j2cl_library( name = "Foo", srcs = glob(["Foo.java"]), deps = [":Bar"] # Directly depends on j2cl_library(name="Bar") ) # Effectively creates js_library(name="Bar") containing the results. j2cl_library( name = "Bar", srcs = glob(["Bar.java"]), ) """ load("//build_def:j2cl_java_library.bzl", j2cl_library_rule = "j2cl_library") load("//build_def:j2cl_library_build_test.bzl", "build_test") def j2cl_library( name, native_srcs = [], generate_build_test = None, _js_srcs = [], _js_deps = [], _js_exports = [], **kwargs): """Translates Java source into JS source in a js_common.provider target. See j2cl_java_library.bzl#j2cl_library for the arguments. Implicit output targets: lib<name>.jar: A java archive containing the byte code. lib<name>-src.jar: A java archive containing the sources (source jar). Args: srcs: Source files (.java or .srcjar) to compile. native_srcs: Native js source files (.native.js). Native sources should be put next to main java file to match. deps: Labels of other j2cl_library() rules. NOT labels of java_library() rules. """ # Private Args: # _js_srcs: JavaScript source files (.js) to include in the bundle. # _js_deps: Direct JavaScript dependencies needed by native code (either # via srcs in _js_srcs or via JsInterop/native.js). # For the JsInterop scenario, we encourage developers to create # proper JsInterop stubs next to the js_library rule and create a # j2cl_import rule there. # _js_exports: Exported JavaScript dependencies. args = dict(kwargs) _append(args, "srcs", native_srcs) _append(args, "srcs", _js_srcs) _append(args, "deps", _js_deps) _append(args, "exports", _js_exports) hidden_arg_names = [i for i in args if i.startswith("_")] for arg_name in hidden_arg_names: args[arg_name[1:]] = args.pop(arg_name) # If this is JRE itself, don't synthesize the JRE dep. target_name = native.package_name() + ":" + name if args["srcs"] and target_name != "third_party/java_src/j2cl/jre/java:jre": args["deps"].append("//internal_do_not_use:jre") # TODO(goktug): remove workaround after b/71772385 is fixed dummy_class_name = name.replace("-", "__") dummy_src = dummy_class_name + "_gen" native.genrule( name = dummy_src, outs = ["dummy_/%s/package-info.java" % dummy_class_name], cmd = "echo \"package dummy_;\" > $@", ) j2cl_library_rule( name = name, srcs_hack = [":" + dummy_src], **args ) if args["srcs"] and (generate_build_test == None or generate_build_test): build_test(name, kwargs.get("tags", [])) def _append(args, name, value): # TODO(goktug): Remove list() coercions after cleaning the callsites w/ depsets since it is # slotted for deprecation in favor of explicit to_list calls. args[name] = list(args.get(name) or []) + list(value or [])
Python
0
@@ -2621,16 +2621,23 @@ t_name = + %22//%22 + native. @@ -2708,33 +2708,9 @@ != %22 -third_party/java_src/j2cl +/ /jre
e3035fb91a96a3ff5627b6847203e3dc11fbc78f
Add libunwind-1.2.1 (#8145)
var/spack/repos/builtin/packages/libunwind/package.py
var/spack/repos/builtin/packages/libunwind/package.py
############################################################################## # Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libunwind(AutotoolsPackage): """A portable and efficient C programming interface (API) to determine the call-chain of a program.""" homepage = "http://www.nongnu.org/libunwind/" url = "http://download.savannah.gnu.org/releases/libunwind/libunwind-1.1.tar.gz" version('1.1', 'fb4ea2f6fbbe45bf032cd36e586883ce')
Python
0.000002
@@ -1540,16 +1540,73 @@ ar.gz%22%0A%0A + version('1.2.1', '06ba9e60d92fd6f55cd9dadb084df19e')%0A vers
77db7bb96686e3308a4061d24c257466d9987405
add delete_project dashboard api.
rio/blueprints/dashboard.py
rio/blueprints/dashboard.py
# -*- coding: utf-8 -*- from slugify import slugify from flask import Blueprint from flask import jsonify from flask_wtf import Form from wtforms import StringField from wtforms.validators import DataRequired from wtforms.validators import ValidationError from wtforms.validators import Length from rio.utils.user import get_current_user_id from rio.utils.user import login_required from rio.utils.slugify import slugify from rio.models import add_instance from rio.models import get_data_or_404 bp = Blueprint('dashboard', __name__) class NewProjectForm(Form): name = StringField('Name', validators=[DataRequired(), Length(max=64)]) class ConfirmDeleteProjectForm(Form): name = StringField('Name', validators=[DataRequired(), Length(max=64)]) @bp.errorhandler(404) def handle_not_found(exception): return jsonify(message='not found'), 404 @bp.route('/projects/new', methods=['POST']) @login_required def new_project(): """New Project.""" form = NewProjectForm() if not form.validate_on_submit(): return jsonify(errors=form.errors), 400 data = form.data data['slug'] = slugify(data['name']) data['owner_id'] = get_current_user_id() id = add_instance('project', **data) if not id: return jsonify(errors={'name': ['duplicated slug.']}), 400 project = get_data_or_404('project', id) return jsonify(**project) @bp.route('/projects/<int:project_id>', methods=['DELETE']) @login_required def delete_project(project_id): project = get_data_or_404('project', project_id) if project['owner_id'] != get_current_user_id(): return jsonify(message='forbidden'), 403 # TODO: implement delete_project task = delete_project.delay(project_id) return jsonify() @bp.route('/projects/<int:project_id>/transfer', methods=['POST']) def transfer_project(project_id): pass
Python
0
@@ -452,16 +452,56 @@ nstance%0A +from rio.models import delete_instance%0A from rio @@ -1526,24 +1526,50 @@ roject_id):%0A + %22%22%22Delete Project.%22%22%22%0A project @@ -1721,73 +1721,35 @@ -# TODO: implement delete_project%0A task = delete_project.delay( +delete_instance('project', proj @@ -1776,16 +1776,18 @@ jsonify( +%7B%7D )%0A%[email protected]
91a77b860387ebed146b9e4e604d007bfabf0b9e
Fix potential bug in parameter passing
lib/ansible/plugins/action/normal.py
lib/ansible/plugins/action/normal.py
# (c) 2012, Michael DeHaan <[email protected]> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from __future__ import (absolute_import, division, print_function) __metaclass__ = type from ansible.plugins.action import ActionBase class ActionModule(ActionBase): def run(self, tmp=None, task_vars=dict()): results = self._execute_module(tmp, task_vars=task_vars) # Remove special fields from the result, which can only be set # internally by the executor engine. We do this only here in # the 'normal' action, as other action plugins may set this. for field in ('ansible_notify',): if field in results: results.pop(field) return results
Python
0.000001
@@ -960,16 +960,20 @@ dule(tmp +=tmp , task_v
e0db9a970c6ea778419cc1f20ca66adedffb7aae
Set HOME, allow errors to pass through to stdout/stderr
utils/mwm.py
utils/mwm.py
# -*- coding: utf-8 -*- from __future__ import absolute_import import logging import os import shutil import subprocess import tempfile from string import Template from .artifact import Artifact LOG = logging.getLogger(__name__) class MWM(object): name = 'mwm' description = 'maps.me MWM' cmd = Template('generate_mwm.sh $input') def __init__(self, input): """ Initialize the MWM generation utility. Args: pbf: the source PBF """ self.input = input self.output = os.path.splitext(input)[0] + '.mwm' def run(self): if self.is_complete: LOG.debug("Skipping MWM, file exists") return convert_cmd = self.cmd.safe_substitute({ 'input': self.input, }) LOG.debug('Running: %s' % convert_cmd) tmpdir = tempfile.mkdtemp() env = os.environ.copy() env.update(MWM_WRITABLE_DIR=tmpdir, TARGET=os.path.dirname(self.output)) try: subprocess.check_call( convert_cmd, env=env, shell=True, executable='/bin/bash', stdout=subprocess.PIPE, stderr=subprocess.PIPE) LOG.debug('generate_mwm.sh complete') finally: shutil.rmtree(tmpdir) @property def results(self): return [Artifact([self.output], self.name)] @property def is_complete(self): return os.path.isfile(self.output)
Python
0
@@ -924,16 +924,29 @@ .update( +HOME=tmpdir, MWM_WRIT @@ -1172,88 +1172,8 @@ ash' -,%0A stdout=subprocess.PIPE,%0A stderr=subprocess.PIPE )%0A%0A
bbcd5e00a4dcd991b9699ef6ae19339325bff7fd
Clean history bug resolved For a large data using batch size was not filtering proberly. So updated the batch logic LEARNER-2697
ecommerce/core/management/commands/clean_history.py
ecommerce/core/management/commands/clean_history.py
from __future__ import unicode_literals import logging import time from dateutil.parser import parse from django.core.management.base import BaseCommand, CommandError from django.db import transaction from oscar.core.loading import get_model from ecommerce.courses.models import Course from ecommerce.invoice.models import Invoice logger = logging.getLogger(__name__) Order = get_model('order', 'Order') OrderLine = get_model('order', 'Line') Product = get_model('catalogue', 'Product') ProductAttributeValue = get_model('catalogue', 'ProductAttributeValue') Refund = get_model('refund', 'Refund') RefundLine = get_model('refund', 'RefundLine') StockRecord = get_model('partner', 'StockRecord') class Command(BaseCommand): help = 'Clean history data' def add_arguments(self, parser): parser.add_argument('--cutoff_date', action='store', dest='cutoff_date', type=str, required=True, help='Cutoff date before which the history data should be cleaned. ' 'format is YYYY-MM-DD') parser.add_argument('--batch_size', action='store', dest='batch_size', type=int, default=1000, help='Maximum number of database rows to delete per query. ' 'This helps avoid locking the database when deleting large amounts of data.') parser.add_argument('--sleep_time', action='store', dest='sleep_time', type=int, default=10, help='Sleep time between deletion of batches') def handle(self, *args, **options): cutoff_date = options['cutoff_date'] batch_size = options['batch_size'] sleep_time = options['sleep_time'] try: cutoff_date = parse(cutoff_date) except: # pylint: disable=bare-except msg = 'Failed to parse cutoff date: {}'.format(cutoff_date) logger.exception(msg) raise CommandError(msg) models = ( Order, OrderLine, Refund, RefundLine, ProductAttributeValue, Product, StockRecord, Course, Invoice, ) for model in models: qs = model.history.filter(history_date__lte=cutoff_date) message = 'Cleaning {} rows from {} table'.format(qs.count(), model.__name__) logger.info(message) qs = qs[:batch_size] while qs.exists(): history_batch = list(qs.values_list('id', flat=True)) with transaction.atomic(): model.history.filter(pk__in=history_batch).delete() logger.info( 'Deleted instances of %s with PKs between %d and %d', model.__name__, history_batch[0], history_batch[-1] ) time.sleep(sleep_time) qs = model.history.filter(history_date__lte=cutoff_date)[:batch_size]
Python
0
@@ -2527,16 +2527,32 @@ ff_date) +.order_by('-pk') %0A%0A @@ -2685,172 +2685,417 @@ -qs = qs%5B:batch_size%5D%0A while qs.exists():%0A history_batch = list(qs.values_list('id', flat=True))%0A%0A with transaction.atomic() +try:%0A # use Primary keys sorting to make sure unique batching as%0A # filtering batch does not work for huge data%0A max_pk = qs%5B0%5D.pk%0A batch_start = qs.reverse()%5B0%5D.pk%0A batch_stop = batch_start + batch_size%0A except IndexError:%0A continue%0A%0A logger.info(message)%0A%0A while batch_start %3C= max_pk :%0A @@ -3104,27 +3104,34 @@ - +queryset = model.histo @@ -3148,25 +3148,116 @@ pk__ -in=history_batch) +gte=batch_start, pk__lt=batch_stop)%0A%0A with transaction.atomic():%0A queryset .del @@ -3417,43 +3417,31 @@ __, -history_batch%5B0%5D, history_batch%5B-1%5D +batch_start, batch_stop %0A @@ -3452,32 +3452,76 @@ )%0A%0A + if batch_stop %3C max_pk:%0A @@ -3543,17 +3543,17 @@ p_time)%0A - +%0A @@ -3563,66 +3563,64 @@ -qs = model.history.filter(history_date__lte=cutoff_date)%5B: + batch_start = batch_stop%0A batch_stop += batc @@ -3625,10 +3625,9 @@ tch_size -%5D %0A
33da474861334d361f3e990eda2518f919158726
Fix reading from socket for Python 3 (PY-15772).
python/helpers/profiler/prof_io.py
python/helpers/profiler/prof_io.py
import traceback from _prof_imports import TSerialization from _prof_imports import TJSONProtocol from _prof_imports import ProfilerRequest from _prof_imports import IS_PY3K from prof_util import ProfDaemonThread import struct def send_message(sock, message): """ Send a serialized message (protobuf Message interface) to a socket, prepended by its length packed in 4 bytes (big endian). """ s = TSerialization.serialize(message, TJSONProtocol.TJSONProtocolFactory()) packed_len = struct.pack('>L', len(s)) sock.sendall(packed_len + s) def get_message(sock, msgtype): """ Read a message from a socket. msgtype is a subclass of of protobuf Message. """ len_buf = socket_read_n(sock, 4) msg_len = struct.unpack('>L', len_buf)[0] msg_buf = socket_read_n(sock, msg_len) msg = msgtype() TSerialization.deserialize(msg, msg_buf, TJSONProtocol.TJSONProtocolFactory()) return msg def socket_read_n(sock, n): """ Read exactly n bytes from the socket. Raise RuntimeError if the connection closed before n bytes were read. """ buf = '' if IS_PY3K: buf = bytearray() while n > 0: data = sock.recv(n) if data == '': raise RuntimeError('unexpected connection close') buf += data n -= len(data) return buf class ProfWriter(object): """ writer thread writes out the commands in an infinite loop """ def __init__(self, sock): self.sock = sock def addCommand(self, message): send_message(self.sock, message) class ProfReader(ProfDaemonThread): """ reader thread reads and dispatches commands in an infinite loop """ def __init__(self, sock, message_processor): ProfDaemonThread.__init__(self) self.sock = sock self.processor = message_processor self.setName("profiler.Reader") def OnRun(self): try: while not self.killReceived: try: message = get_message(self.sock, ProfilerRequest) except: traceback.print_exc() return # Finished communication. try: self.processor.process(message) except: traceback.print_exc() except: traceback.print_exc()
Python
0
@@ -1124,21 +1124,8 @@ %22%22%22%0A - buf = ''%0A @@ -1161,16 +1161,43 @@ earray() +%0A else:%0A buf = '' %0A%0A wh
1a00a2aaa2ce73f723a7555cad34dd5742677b3a
test name duplicated
python/intercoop/apiclient_test.py
python/intercoop/apiclient_test.py
# -*- encoding: utf-8 -*- import unittest from . import apiclient from . import packaging from . import crypto import requests_mock from yamlns import namespace as ns class ApiClient_Test(unittest.TestCase): yaml=u"""\ originpeer: somillusio origincode: 666 name: Perico de los Palotes address: Percebe, 13 city: Villarriba del Alcornoque state: Albacete postalcode: '01001' country: ES """ def setUp(self): self.keyfile = 'testkey.pem' self.key = crypto.loadKey(self.keyfile) self.personalData = ns.loads(self.yaml) self.apiurl = "https://api.somacme.coop/intercoop" self.service = "contract" self.uuid = '01020304-0506-0708-090a-0b0c0d0e0f10' self.continuationUrl = 'https://somacme.coop/contract?token={}'.format( self.uuid) self.client = apiclient.ApiClient( apiurl=self.apiurl, key=self.key, ) def respondToPost(self, status, text=None): text = text or ns( continuationUrl = self.continuationUrl ).dump() m = requests_mock.mock() m.post( self.apiurl+'/activateService', text = text, status_code = status, ) return m def test_activateService_receivesUrl(self): with self.respondToPost(200) as m: url=self.client.activateService( service=self.service, personalData=self.personalData, ) self.assertEqual(url,self.continuationUrl) def test_activateService_sendsPackage(self): with self.respondToPost(200) as m: url=self.client.activateService( service=self.service, personalData=self.personalData, ) self.assertEqual([ (h.method, h.url, h.text) for h in m.request_history], [ ('POST', 'https://api.somacme.coop/intercoop/activateService', u"intercoopVersion: '1.0'\n" u"payload: b3JpZ2lucGVlcjogc29taWxsdXNpbwpvcmlnaW5jb2RlOiA2NjYKbmFtZTogUGVyaWNvIGRlIGxvcyBQYWxvdGVzCmFkZHJlc3M6IFBlcmNlYmUsIDEzCmNpdHk6IFZpbGxhcnJpYmEgZGVsIEFsY29ybm9xdWUKc3RhdGU6IEFsYmFjZXRlCnBvc3RhbGNvZGU6ICcwMTAwMScKY291bnRyeTogRVMK\n" u"signature: 2PXEJSmGwaIZY4XgWZYcmh8qexmGe-Ve7p45kLmtia5wO4CXrbx3BiCeFMvbi9eiGazOg-Cy9ktdR3SEYuZlwlkPpQ-C2QrVY2c6o1PKNNLFJoJIYkfnIDwTdtlY5qsxbC-kKbWO2WtnhCeBnBNKOwz9-lbIlrLYo470MjuTLheVmoXuyTHp1hOjHDDn2e38kJT-miNtr4knDn-uMYCXdAx3eIGTBOTQ8wGFz55JR_jluZKIN8wEgJQWAHVMY1FbtsutESRqJ_TMLbCbqe0llxWppxgF20XyzleSxTV6v_I2GZyfEWlYlFnOkk5TEjqkk5vZOFGXra2J3Cabzn9QFQ==\n" ) ]) def test_activateService_receivesUrl(self): error = ns() error.type='BadPeer' error.message="The entity 'badpeer' is not a recognized one" error.arguments=['badpeer'] with self.respondToPost(403,error.dump()) as m: with self.assertRaises(packaging.BadPeer) as ctx: url=self.client.activateService( service=self.service, personalData=self.personalData, ) self.assertEqual(str(ctx.exception), "The entity 'badpeer' is not a recognized one", ) # vim: ts=4 sw=4 et
Python
0.000626
@@ -914,17 +914,16 @@ )%0A%0A -%0A def @@ -2675,17 +2675,16 @@ %5D)%0A%0A -%0A def @@ -2700,35 +2700,31 @@ vateService_ -receivesUrl +badPeer (self):%0A
e38407cb7e5fae9f7f3b6750d791175bd134c92e
fix adding host on ubuntu Reviewed-by:frank
python/lib/cloudutils/utilities.py
python/lib/cloudutils/utilities.py
from cloudException import CloudRuntimeException, formatExceptionInfo import logging from subprocess import PIPE, Popen from signal import alarm, signal, SIGALRM, SIGKILL import sys import os class bash: def __init__(self, args, timeout=600): self.args = args logging.debug("execute:%s"%args) self.timeout = timeout self.process = None self.success = False self.run() def run(self): class Alarm(Exception): pass def alarm_handler(signum, frame): raise Alarm try: self.process = Popen(self.args, shell=True, stdout=PIPE, stderr=PIPE) if self.timeout != -1: signal(SIGALRM, alarm_handler) alarm(self.timeout) try: self.stdout, self.stderr = self.process.communicate() if self.timeout != -1: alarm(0) except Alarm: os.kill(self.process.pid, SIGKILL) raise CloudRuntimeException("Timeout during command execution") self.success = self.process.returncode == 0 except: raise CloudRuntimeException(formatExceptionInfo()) if not self.success: logging.debug("Failed to execute:" + self.getErrMsg()) def isSuccess(self): return self.success def getStdout(self): return self.stdout.strip("\n") def getLines(self): return self.stdout.split("\n") def getStderr(self): return self.stderr.strip("\n") def getErrMsg(self): if self.isSuccess(): return "" if self.getStderr() is None or self.getStderr() == "": return self.getStdout() else: return self.getStderr() def initLoging(logFile=None): try: if logFile is None: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(filename=logFile, level=logging.DEBUG) except: logging.basicConfig(level=logging.DEBUG) def writeProgressBar(msg, result): output = "[%-6s]\n"%"Failed" if msg is not None: output = "%-30s"%msg elif result is True: output = "[%-2s]\n"%"OK" elif result is False: output = "[%-6s]\n"%"Failed" sys.stdout.write(output) sys.stdout.flush() class Distribution: def __init__(self): self.distro = "Unknown" self.release = "Unknown" if os.path.exists("/etc/fedora-release"): self.distro = "Fedora" elif os.path.exists("/etc/redhat-release"): version = file("/etc/redhat-release").readline() if version.find("Red Hat Enterprise Linux Server release 6") != -1 or version.find("Scientific Linux release 6") != -1 or version.find("CentOS Linux release 6") != -1: self.distro = "RHEL6" elif version.find("CentOS release") != -1: self.distro = "CentOS" else: self.distro = "RHEL5" elif os.path.exists("/etc/legal") and "Ubuntu" in file("/etc/legal").read(-1): self.distro = "Ubuntu" kernel = bash("uname -r").getStdout() if kernel.find("2.6.32") != -1: self.release = "10.04" self.arch = bash("uname -m").getStdout() else: self.distro = "Unknown" def getVersion(self): return self.distro def getRelease(self): return self.release def getArch(self): return self.arch class serviceOps: pass class serviceOpsRedhat(serviceOps): def isServiceRunning(self, servicename): try: o = bash("service " + servicename + " status") if "running" in o.getStdout() or "start" in o.getStdout() or "Running" in o.getStdout(): return True else: return False except: return False def stopService(self, servicename,force=False): if self.isServiceRunning(servicename) or force: return bash("service " + servicename +" stop").isSuccess() return True def disableService(self, servicename): result = self.stopService(servicename) bash("chkconfig --del " + servicename) return result def startService(self, servicename,force=False): if not self.isServiceRunning(servicename) or force: return bash("service " + servicename + " start").isSuccess() return True def enableService(self, servicename,forcestart=False): bash("chkconfig --level 2345 " + servicename + " on") return self.startService(servicename,force=forcestart) def isKVMEnabled(self): if os.path.exists("/dev/kvm"): return True else: return False class serviceOpsUbuntu(serviceOps): def isServiceRunning(self, servicename): try: o = bash("sudo /usr/sbin/service " + servicename + " status") if "not running" in o.getStdout(): return False else: return True except: return False def stopService(self, servicename,force=False): if self.isServiceRunning(servicename) or force: return bash("sudo /usr/sbin/service " + servicename +" stop").isSuccess() def disableService(self, servicename): result = self.stopService(servicename) bash("sudo update-rc.d -f " + servicename + " remove") return result def startService(self, servicename,force=False): if not self.isServiceRunning(servicename) or force: return bash("sudo /usr/sbin/service " + servicename + " start").isSuccess() def enableService(self, servicename,forcestart=False): bash("sudo update-rc.d -f " + servicename + " remove") bash("sudo update-rc.d -f " + servicename + " defaults") return self.startService(servicename,force=forcestart) def isKVMEnabled(self): return bash("kvm-ok").isSuccess()
Python
0
@@ -5264,36 +5264,35 @@ rvicename,force= -Fals +Tru e):%0A if s @@ -5639,36 +5639,35 @@ rvicename,force= -Fals +Tru e):%0A if n @@ -5846,36 +5846,35 @@ name,forcestart= -Fals +Tru e):%0A bash
8c43729c4be33a6758d3ade9cc745e757c261c4f
fix variable name
Baraffe_tables/BDmass_to_flux_ratio.py
Baraffe_tables/BDmass_to_flux_ratio.py
#!/usr/bin/python """Brown Dwarf Flux ratio calculator. Calculates the flux/contrast ratio between a host star and a brown dwarf of a specified mass. This script uses the SIMBAD database to obtain the host star parameters, such as magnitude and age. The companion/brown dwarf mass is a given input (in Mjup) and is used to obtain the band magnitudes of the companion from the Barraffe tables. The magnitude difference between the host and companion are used to caluate the flux/contrast ratio. Inputs ------ Star name: str Stellar idenification number. eg. HD30501 companion_mass: float Mass of companion in Jupiter masses age: float Stellar Age. (Closest model is used) band: list of str Spectral bands to obtain ratio. model: str Choose between the 2003 and 2015 Barraffe modeling. """ #TODO: Interpolate between tables? from __future__ import division, print_function import argparse import numpy as np from db_queries import get_stellar_params from calculations import calculate_flux_ratios, calculate_stellar_radius from table_search import mass_table_search # import matplotlib.pyplot as plt def _parser(): """Take care of all the argparse stuff. :returns: the args """ parser = argparse.ArgumentParser(description='Determine flux ratio of stellar companion') parser.add_argument('star_name', help='Input fits file to calibrate') parser.add_argument('companion_mass', help='Mass of companion (M_Jup)', type=float) parser.add_argument('age', help='Star age (Gyr)', type=float) parser.add_argument('-b', '--band', help='Spectral Band to measure. Options=["All", "K", ""]', choices=["All", "J", "H", "K"], default=["All"], nargs="+", type=str) parser.add_argument('-m', '--model', choices=['03', '15', '2003', '2015'], help='Baraffe model to use [2003, 2015]', default='2003', type=str) parser.add_argument("-a", "--area_ratio", help="Calculate the area ratio.", default=False, action="store_true") args = parser.parse_args() return args def main(star_name, companion_mass, stellar_age, band=["All"], model="2003", area_ratio=False): """Compute flux ratio of star to companion Parameters ---------- star_name: str Stellar idenification number. eg. HD30501 companion_mass: float Mass of companion in Jupiter masses stellar_age: float Stellar Age. (Closest model is used) Stellar Age. (Closest model is used). band: list of str Spectral bands to obtain ratio. model: str (optional) Year of Barraffe model to use [2003 (default), 2015]. area_ratio: bool default=False Perform simple radius and area comparions calculations. """ if "All" in band: band = ["J", "H", "K"] # Obtain Stellar parameters from astroquery star_params = get_stellar_params(star_name) # returns a astroquesry result table # Get parameters for this mass and age companion_params = mass_table_search(companion_mass, stellar_age, model=model) Flux_ratios = calculate_flux_ratios(star_params, companion_params) # Print flux ratios using a generator print("\nFlux ratios:") [print(("{0!s} band star/companion Flux ratio = {1:4.2f}," " >>> companion/star Flux ratio ={2:0.4f}").format(key, val[0], 1. / val[0])) for key, val in Flux_ratios.items() if key in band] if area_ratio: # Compare to area ratio Rstar = calculate_stellar_radius(star_name, star_params) print(Rstar) Rcomp_Rstar = companion_params["R"] / Rstar print("\nRadius Calculation") print("Host radius = {} R_sun".format(Rstar[0])) print("companion radius = {} R_sun".format(np.round(companion["R"], 4))) print("Radius Ratio of companion/star = {} ".format(Rcomp_Rstar[0])) print("Area Ratio of companion/star = {} ".format(Rcomp_Rstar[0]**2)) return 0 if __name__ == '__main__': args = vars(_parser()) star_name = args.pop('star_name') companion_mass = args.pop('companion_mass') age = args.pop('age') opts = {k: args[k] for k in args} sys.exit(main(star_name, companion_mass, age, **opts))
Python
0.998523
@@ -3533,27 +3533,16 @@ _radius( -star_name, star_par @@ -3795,16 +3795,23 @@ ompanion +_params %5B%22R%22%5D, 4 @@ -3859,33 +3859,32 @@ ion/star = %7B%7D - %22.format(Rcomp_R @@ -3946,17 +3946,16 @@ = %7B%7D - %22.format
ee42b37a7dff1e111d7b4df71ece818e7c2f2d38
set Keen.io settings
buildtimetrend/settings.py
buildtimetrend/settings.py
# vim: set expandtab sw=4 ts=4: # pylint: disable=invalid-name,too-few-public-methods ''' Manages settings of buildtime trend Copyright (C) 2014 Dieter Adriaenssens <[email protected]> This file is part of buildtime-trend <https://github.com/ruleant/buildtime-trend/> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' import os import yaml import buildtimetrend from buildtimetrend.collection import Collection from buildtimetrend.tools import check_file class Settings(object): ''' Settings class is a singleton Inspired by http://python-3-patterns-idioms-test.readthedocs.org/en/latest/Singleton.html ''' class __Settings(object): ''' Settings class contains settings and config options ''' def __init__(self): ''' Initialise class ''' self.settings = Collection() # set project name project_name = buildtimetrend.NAME # use Travis repo slug as project name if 'TRAVIS_REPO_SLUG' in os.environ: project_name = os.getenv('TRAVIS_REPO_SLUG') self.set_project_name(project_name) def set_project_name(self, name): ''' Set project name Parameters : - name : project name ''' self.add_setting("project_name", name) def get_project_name(self): ''' Get project name ''' return self.get_setting("project_name") def add_setting(self, name, value): ''' Add a setting Parameters : - name : Setting name - value : Setting value ''' self.settings.add_item(name, value) def get_setting(self, name): ''' Get a setting value Parameters : - name : Setting name ''' return self.settings.get_item(name) def load_config_file(self, config_file): ''' Load settings from a config file Parameters : - config_file : name of the config file ''' if not check_file(config_file): return False with open(config_file, 'rb') as file_stream: config = yaml.load(file_stream) self.settings.add_items(config["buildtimetrend"]) return True def get_project_info(self): ''' Get project info as a dictonary ''' return { "version": buildtimetrend.VERSION, "schema_version": buildtimetrend.SCHEMA_VERSION, "project_name": str(self.get_project_name()) } instance = None def __new__(cls): # __new__ always a classmethod ''' Create a singleton ''' if not Settings.instance: Settings.instance = Settings.__Settings() return Settings.instance def __getattr__(self, name): ''' Redirect access to get singleton properties ''' return getattr(self.instance, name) def __setattr__(self, name): ''' Redirect access to set singleton properties ''' return setattr(self.instance, name)
Python
0
@@ -896,16 +896,28 @@ rt yaml%0A +import keen%0A import b @@ -2998,16 +2998,462 @@ trend%22%5D) +%0A%0A # set Keen.io settings%0A if %22keen%22 in config:%0A if %22project_id%22 in config%5B%22keen%22%5D:%0A keen.project_id = config%5B%22keen%22%5D%5B%22project_id%22%5D%0A if %22write_key%22 in config%5B%22keen%22%5D:%0A keen.write_key = config%5B%22keen%22%5D%5B%22write_key%22%5D%0A if %22read_key%22 in config%5B%22keen%22%5D:%0A keen.read_key = config%5B%22keen%22%5D%5B%22read_key%22%5D %0A
19672bf43c721ee66c1f8c7f3d65cfbeae564cad
fix typo in perfmon parameter
yape/main.py
yape/main.py
# os methods for manipulating paths import os import argparse import sys import bokeh import csv import sqlite3 from yape.parsepbuttons import parsepbuttons from yape.plotpbuttons import mgstat,vmstat,iostat,perfmon,sard def fileout(db,filename,fileprefix,section): c = db.cursor() c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", [section]) if len(c.fetchall()) == 0: return None file=os.path.join(filename,fileprefix+section+".csv") print("exporting "+section+" to "+file) c.execute("select * from \""+section+"\"") columns = [i[0] for i in c.description] with open(file, "w") as f: csvWriter = csv.writer(f) csvWriter.writerow(columns) csvWriter.writerows(c) def ensure_dir(file_path): directory = os.path.dirname(file_path) if not os.path.exists(directory): os.makedirs(directory) def fileout_splitcols(db,filename,fileprefix,section,split_on): c = db.cursor() c.execute("SELECT name FROM sqlite_master WHERE type='table' AND name=?", [section]) if len(c.fetchall()) == 0: return None c.execute("select distinct "+split_on+" from \""+section+"\"") rows=c.fetchall() for column in rows: c.execute("select * from \""+section+"\" where "+split_on+"=?",[column[0]]) file=os.path.join(filename,fileprefix+section+"."+column[0]+".csv") print("exporting "+section+"-"+column[0]+" to "+file) columns = [i[0] for i in c.description] with open(file, "w") as f: csvWriter = csv.writer(f) csvWriter.writerow(columns) csvWriter.writerows(c) def yape2(): parser = argparse.ArgumentParser(description='Yape 2.0') parser.add_argument("pButtons_file_name", help="path to pButtons file to use") parser.add_argument("--filedb",help="use specific file as DB, useful to be able to used afterwards or as standalone datasource.") parser.add_argument("--skip-parse",dest="skipparse",help="disable parsing; requires filedb to be specified to supply data",action="store_true") parser.add_argument("-c",dest='csv',help="will output the parsed tables as csv files. useful for further processing. will currently create: mgstat, vmstat, sar-u. sar-d and iostat will be output per device",action="store_true") parser.add_argument("--mgstat",dest='graphmgstat',help="plot mgstat data",action="store_true") parser.add_argument("--vmstat",dest='graphvmstat',help="plot vmstat data",action="store_true") parser.add_argument("--iostat",dest='graphiostat',help="plot iostat data",action="store_true") parser.add_argument("--sard",dest='graphsard',help="plot sar-d data",action="store_true") parser.add_argument("--permon",dest='graphperfmon',help="plot perfmon data",action="store_true") parser.add_argument("--timeframe",dest='timeframe',help="specify a timeframe for the plots, i.e. --timeframe \"2018-05-16 00:01:16,2018-05-16 17:04:15\"") parser.add_argument("--prefix",dest='prefix',help="specify output file prfeix") parser.add_argument("--plotDisks",dest='plotDisks',help="restrict list of disks to plot") parser.add_argument("-a","--all",dest='all',help="graph everything",action="store_true") parser.add_argument("-o","--out",dest='out',help="specify base output directory, defaulting to <pbuttons_name>/") args = parser.parse_args() try: if args.skipparse: if args.filedb is None: print("filedb required with skip-parse set") return -1 if args.filedb is not None: db=sqlite3.connect(args.filedb) else: db=sqlite3.connect(":memory:") db.execute('pragma journal_mode=wal') db.execute('pragma synchronous=0') if not args.skipparse: parsepbuttons(args.pButtons_file_name,db) if args.out is not None: basefilename=args.out else: basefilename=args.pButtons_file_name.split(".")[0] if args.prefix is not None: fileprefix=args.prefix else: fileprefix="" if args.plotDisks is not None: plotDisks=args.plotDisks else: plotDisks="" if args.timeframe is not None: TIMEFRAMEMODE=True print("timeframe on "+args.timeframe) else: TIMEFRAMEMODE=False if args.csv: ensure_dir(basefilename+os.sep) fileout(db,basefilename,fileprefix,"mgstat") fileout(db,basefilename,fileprefix,"vmstat") fileout_splitcols(db,basefilename,fileprefix,"iostat","Device") fileout_splitcols(db,basefilename,fileprefix,"sar-d","DEV") fileout(db,basefilename,fileprefix,"perfmon") fileout(db,basefilename,fileprefix,"sar-u") if args.graphsard or args.all: ensure_dir(basefilename+os.sep) sard(db,basefilename,fileprefix,plotDisks,args.timeframe) if args.graphmgstat or args.all: ensure_dir(basefilename+os.sep) mgstat(db,basefilename,fileprefix,args.timeframe) if args.graphvmstat or args.all: ensure_dir(basefilename+os.sep) vmstat(db,basefilename,fileprefix,args.timeframe) if args.graphiostat or args.all: ensure_dir(basefilename+os.sep) iostat(db,basefilename,fileprefix,plotDisks,args.timeframe) if args.graphperfmon or args.all: ensure_dir(basefilename+os.sep) perfmon(db,basefilename,fileprefix,args.timeframe) except OSError as e: print('Could not process pButtons file because: {}'.format(str(e)))
Python
0.000026
@@ -2738,16 +2738,17 @@ t(%22--per +f mon%22,des
12efb71143a18e191e05a1b5f0e6d7c59854e0ba
fix brampton scraper class name
ca_on_brampton/__init__.py
ca_on_brampton/__init__.py
# coding: utf-8 from utils import CanadianJurisdiction class London(CanadianJurisdiction): jurisdiction_id = u'ocd-jurisdiction/country:ca/csd:3521010/council' geographic_code = 3521010 division_name = u'Brampton' name = u'Brampton City Council' url = 'http://www.brampton.ca'
Python
0.000004
@@ -60,12 +60,14 @@ ass -Lond +Brampt on(C
a4fbc3372a446861f086d847186726b80443f212
add utils for printing results; add ndiff table
causalinference/results.py
causalinference/results.py
import numpy as np from scipy.stats import norm class Results(object): def __init__(self, causal): self.causal = causal def ndiff(self): print self.causal.ndiff def propensity(self): if not hasattr(self.causal, 'pscore'): self.causal.propensity() print 'Coefficients:', self.causal.pscore['coeff'] print 'Log-likelihood:', self.causal.pscore['loglike'] def summary(self): header = ('%8s'+'%12s'*4+'%24s') % ('', 'coef', 'std err', 'z', 'P>|z|', '[95% Conf. Int.]') print header print '-' * len(header) tuples = (('ATE', self.causal.ate, self.causal.ate_se), ('ATT', self.causal.att, self.causal.att_se), ('ATC', self.causal.atc, self.causal.atc_se)) for (name, coef, se) in tuples: t = coef / se p = 1 - norm.cdf(np.abs(t)) lw = coef - 1.96*se up = coef + 1.96*se print ('%8s'+'%12.3f'*6) % (name, coef, se, t, p, lw, up)
Python
0
@@ -124,36 +124,711 @@ sal%0A -%0A%0A%09def ndiff(self):%0A%0A%09%09print +%09%09self.table_width = 80%0A%0A%0A%09def _varnames(self, varnums):%0A%0A%09%09return %5B'X'+str(varnum+1) for varnum in varnums%5D%0A%0A%0A%09def _make_row(self, entries):%0A%0A%09%09col_width = self.table_width // len(entries)%0A%09%09first_col_width = col_width + self.table_width %25 len(entries)%0A%0A%09%09return ('%25'+str(first_col_width)+'s' + ('%25'+str(col_width)+'.3f')*(len(entries)-1)) %25 entries%0A%0A%0A%09def ndiff(self):%0A%0A%09%09varnames = self._varnames(xrange(self.causal.K))%0A%09%09X_t_mean = self.causal.X_t.mean(0)%0A%09%09X_t_sd = np.sqrt(self.causal.X_t.var(0))%0A%09%09X_c_mean = self.causal.X_c.mean(0)%0A%09%09X_c_sd = np.sqrt(self.causal.X_c.var(0))%0A%0A%09%09for i in xrange(self.causal.K):%0A%09%09%09print self._make_row((varnames%5Bi%5D, X_t_mean%5Bi%5D, X_t_sd%5Bi%5D, X_c_mean%5Bi%5D, X_c_sd%5Bi%5D, sel @@ -841,16 +841,21 @@ al.ndiff +%5Bi%5D)) %0A%0A%0A%09def @@ -1118,20 +1118,19 @@ %25 ('', ' -coef +est ', 'std @@ -1516,29 +1516,23 @@ int -('%258s'+'%2512.3f'*6) %25 +self._make_row( (nam @@ -1557,10 +1557,11 @@ lw, up) +) %0A%0A
d0ce2b074ffd603c507069d8a5ab1189fad0ca56
Update a version number from trunk r9016
pywikibot/families/wikia_family.py
pywikibot/families/wikia_family.py
# -*- coding: utf-8 -*- __version__ = '$Id$' import family # The Wikia Search family # user-config.py: usernames['wikia']['wikia'] = 'User name' class Family(family.Family): def __init__(self): family.Family.__init__(self) self.name = u'wikia' self.langs = { u'wikia': None, } def hostname(self, code): return u'www.wikia.com' def version(self, code): return "1.15.1" def scriptpath(self, code): return '' def apipath(self, code): return '/api.php'
Python
0
@@ -444,11 +444,11 @@ %221.1 -5.1 +6.2 %22%0A%0A
9161e2dfe0edd27004ccd964a39c092275e9e5ab
Add derivation outlines
eqs_backend/eqs_backend.py
eqs_backend/eqs_backend.py
# Copyright (c) 2016, Herman Bergwerf. All rights reserved. # Use of this source code is governed by an AGPL-3.0-style license # that can be found in the LICENSE file. from flask import Flask, request from neo4j.v1 import GraphDatabase, basic_auth from .helpers import * # Define Flask server instance. server = Flask(__name__) driver = GraphDatabase.driver( 'bolt://0.0.0.0', auth=basic_auth( 'neo4j', 'test')) def setupDb(): """ Setup empty database. """ db.run('CREATE (:ContextRoot)') db.run('CREATE CONSTRAINT ON (node:Context) ASSERT node.label IS UNIQUE') db.run('CREATE CONSTRAINT ON (node:Variable) ASSERT node.label IS UNIQUE') def openDb(): """ Open Neo4j session. """ return driver.session() @server.route('/equation/', methods=['GET']) def listEquations(): """ REST interface for retrieving equations. """ return '{}' @server.route('/search/') def textSearch(): """ Fulltext search interface to search for: - contexts - equation labels - variables and aliases """ return '{}' @server.route('/derive/', methods=['POST']) def appendDerivation(): """ Append derivation to exiting equation. """ return '{}' @server.route('/variable/', methods=['POST']) def addVariable(): """ Add new variable within the given context. """ data = request.get_json() if isDictAndContains(data, ['label', 'latex', 'parent', 'expr']): db = openDb() # Run query. db.run(''' MATCH (parent:Context {{label:'{}'}}) CREATE (node:Variable {{label:'{}', latex:'{}', expr:'{}'}}) CREATE (node)-[:BelongsTo]->(parent) '''.format(data['parent'], data['label'], data['latex'], data['expr'])) db.close() return dumpMessage('processed') else: return dumpMessage('failed', 'Incomplete data.') @server.route('/context/', methods=['POST']) def appendContext(): """ Append context to the given parent context. If no parent is defined the context is appended to the root context. """ data = request.get_json() if isDictAndContains(data, ['label']): db = openDb() # Find parent query. parent = "Context {label:'{}'}".format(data[ 'parent']) if 'parent' in data else 'ContextRoot' # Run query. db.run(''' MATCH (parent:{}) CREATE (node:Context {{label:'{}'}}) CREATE (node)-[:BelongsTo]->(parent) '''.format(parent, data['label'])) db.close() return dumpMessage('processed') else: return dumpMessage('failed', 'No context label provided.')
Python
0.000005
@@ -266,16 +266,170 @@ port *%0A%0A +%0A# TODO: consider using http://flask-restful.readthedocs.io/en/latest/%0A# http://blog.miguelgrinberg.com/post/designing-a-restful-api-using-flask-restful%0A%0A # Define @@ -1281,17 +1281,21 @@ ('/deriv -e +ation /', meth @@ -1348,71 +1348,1211 @@ -Append derivation to exiting equation.%0A %22%22%22%0A%0A return '%7B%7D' +# Add derivation%0A%0A A derivation has the following structure:%0A - One source relation: the derivation loads an external equation as base,%0A the source can be either a variable defenition or another derivation.%0A - A number of substitutions: in the derivation other equations or variable%0A definitions can be used for substitution.%0A - Rewritten expression: the expression that is equal to the source equation%0A after all substitutions are applied.%0A%0A A derivation does not neccesarily have to substitute other equations. It can%0A simply be a rewritten form of the source equation. Note that SymPy can%0A assist in creating derivations. The main point is providing a more flexible%0A environment for adding custom derivations, and controlling which steps are%0A shown to the user.%0A %22%22%22%0A%0A data = request.get_json()%0A if isDictAndContains(data, %5B'source', 'subs', 'expr'%5D):%0A db = openDb()%0A%0A # Retrieve source equation.%0A%0A # Execute substitutions.%0A%0A # Check output expression.%0A%0A # Write expression to database.%0A%0A db.close()%0A%0A return dumpMessage('processed')%0A else:%0A return dumpMessage('failed', 'Incomplete data.') %0A%0A%0A@
27049d58b322bb50554198ecc64eab7731b86149
add support for group metadata
zarr/meta.py
zarr/meta.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, print_function, division import json import numpy as np from zarr.compat import PY2, text_type from zarr.errors import MetadataError def decode_metadata(b): s = text_type(b, 'ascii') meta = json.loads(s) zarr_format = meta.get('zarr_format', None) if zarr_format != 1: raise MetadataError('unsupported zarr format: %s' % zarr_format) try: meta = dict( zarr_format=meta['zarr_format'], shape=tuple(meta['shape']), chunks=tuple(meta['chunks']), dtype=decode_dtype(meta['dtype']), compression=meta['compression'], compression_opts=meta['compression_opts'], fill_value=meta['fill_value'], order=meta['order'], ) except Exception as e: raise MetadataError('error decoding metadata: %s' % e) else: return meta def encode_metadata(meta): meta = dict( zarr_format=1, shape=meta['shape'], chunks=meta['chunks'], dtype=encode_dtype(meta['dtype']), compression=meta['compression'], compression_opts=meta['compression_opts'], fill_value=meta['fill_value'], order=meta['order'], ) s = json.dumps(meta, indent=4, sort_keys=True, ensure_ascii=True) b = s.encode('ascii') return b def encode_dtype(d): if d.fields is None: return d.str else: return d.descr def _decode_dtype_descr(d): # need to convert list of lists to list of tuples if isinstance(d, list): # recurse to handle nested structures if PY2: # pragma: no cover # under PY2 numpy rejects unicode field names d = [(f.encode('ascii'), _decode_dtype_descr(v)) for f, v in d] else: d = [(f, _decode_dtype_descr(v)) for f, v in d] return d def decode_dtype(d): d = _decode_dtype_descr(d) return np.dtype(d)
Python
0
@@ -192,24 +192,42 @@ dataError%0A%0A%0A +ZARR_FORMAT = 2%0A%0A%0A def decode_m @@ -221,24 +221,30 @@ %0Adef decode_ +array_ metadata(b): @@ -369,17 +369,27 @@ rmat != -1 +ZARR_FORMAT :%0A @@ -978,16 +978,22 @@ encode_ +array_ metadata @@ -1041,9 +1041,19 @@ mat= -1 +ZARR_FORMAT ,%0A @@ -2035,12 +2035,665 @@ np.dtype(d)%0A +%0A%0Adef decode_group_metadata(b):%0A s = text_type(b, 'ascii')%0A meta = json.loads(s)%0A zarr_format = meta.get('zarr_format', None)%0A if zarr_format != ZARR_FORMAT:%0A raise MetadataError('unsupported zarr format: %25s' %25 zarr_format)%0A try:%0A meta = dict(%0A zarr_format=meta%5B'zarr_format'%5D,%0A )%0A except Exception as e:%0A raise MetadataError('error decoding metadata: %25s' %25 e)%0A else:%0A return meta%0A%0A%0Adef encode_group_metadata(meta=None):%0A meta = dict(%0A zarr_format=ZARR_FORMAT,%0A )%0A s = json.dumps(meta, indent=4, sort_keys=True, ensure_ascii=True)%0A b = s.encode('ascii')%0A return b%0A
1b2f9e8cff542868765f61d1af0eca004c1de791
support skipping rows in the base processor
datapackage_pipelines_mojp/common/processors/base_processors.py
datapackage_pipelines_mojp/common/processors/base_processors.py
from itertools import chain from datapackage_pipelines.wrapper import ingest, spew from datapackage_pipelines_mojp import settings as mojp_settings class BaseProcessor(object): """ all mojp processor should extend this class it is pluggable into our unit tests to allow mocks and automated tests of processors """ def __init__(self, parameters, datapackage, resources, settings=None): self._parameters = parameters self._datapackage = datapackage self._resources = resources self._settings = mojp_settings if not settings else settings @classmethod def main(cls): # can be used like this in datapackage processor files: # if __main__ == '__main__': # Processor.main() spew(*cls(*ingest()).spew()) def spew(self): self._datapackage, self._resources = self._process(self._datapackage, self._resources) return self._datapackage, self._resources def _process(self, datapackage, resources): return datapackage, resources def _get_settings(self, key=None, default=None): if key: ret = getattr(self._settings, key, default) if default is None and ret is None: raise Exception("unknown key: {}".format(key)) else: return ret else: return self._settings class AddResourcesProcessor(BaseProcessor): def _get_resource_descriptors(self): return [] def _get_resources_iterator(self): return () def _process(self, datapackage, resources): datapackage["resources"] += self._get_resource_descriptors() resources = chain(resources, self._get_resources_iterator()) return super(AddResourcesProcessor, self)._process(datapackage, resources) class FilterResourcesProcessor(BaseProcessor): def _filter_datapackage(self, datapackage): datapackage["resources"] = self._filter_resource_descriptors(datapackage["resources"]) return datapackage def _filter_resource_descriptors(self, descriptors): return [self._filter_resource_descriptor(descriptor) for descriptor in descriptors] def _filter_resource_descriptor(self, descriptor): return descriptor def _filter_resources(self, resources, datapackage): for i, resource in enumerate(resources): resource_descriptor = datapackage["resources"][i] yield self._filter_resource(resource, resource_descriptor) def _filter_resource(self, resource, descriptor): for row in resource: yield self._filter_row(row, descriptor) def _filter_row(self, row, resource_descriptor): return row def _process(self, datapackage, resources): datapackage = self._filter_datapackage(datapackage) resources = self._filter_resources(resources, datapackage) return super(FilterResourcesProcessor, self)._process(datapackage, resources) class BaseDownloadProcessor(AddResourcesProcessor): def _get_resource_descriptors(self): return [{"name": self._get_source_name(), "path": "{}.csv".format(self._get_source_name()), "schema": self._get_schema()}] def _get_resources_iterator(self): if self._parameters.get("mock"): return [self._mock_download()] else: return [self._download()] def _get_schema(self): raise NotImplementedError() def _download(self): raise NotImplementedError() def _mock_download(self): raise NotImplementedError() def _get_source_name(self): raise NotImplementedError()
Python
0
@@ -2588,37 +2588,46 @@ ce:%0A -yield +filtered_row = self._filter_ro @@ -2636,32 +2636,108 @@ row, descriptor) +%0A if filtered_row is not None:%0A yield filtered_row %0A%0A def _filte
69642fbfa143d475b3dcc548bffbda8a6dd6c680
Enable template caching in production
rotd/settings/production.py
rotd/settings/production.py
# -*- coding: utf-8 -*- from .base import * from .util import get_env_setting DEBUG = False DOMAIN = get_env_setting('ROTD_DOMAIN') ALLOWED_HOSTS = [ DOMAIN, ] DATABASES = { "default": { "ENGINE": "django.db.backends.postgresql_psycopg2", "NAME": get_env_setting('ROTD_DB_NAME'), "USER": get_env_setting('ROTD_DB_USER'), "PASSWORD": get_env_setting('ROTD_DB_PASSWORD'), "HOST": "localhost", "PORT": "", }, } SECRET_KEY = get_env_setting('ROTD_SECRET_KEY') EMAIL_BACKEND = 'django.core.mail.backends.smtp.EmailBackend' EMAIL_HOST = get_env_setting('ROTD_EMAIL_HOST') EMAIL_HOST_PASSWORD = get_env_setting('ROTD_EMAIL_HOST_PASSWORD') EMAIL_HOST_USER = get_env_setting('ROTD_EMAIL_HOST_USER') EMAIL_PORT = get_env_setting('ROTD_EMAIL_PORT') EMAIL_USE_TLS = True
Python
0
@@ -824,8 +824,774 @@ = True%0A +%0ATEMPLATES = %5B%0A %7B%0A 'BACKEND': 'django.template.backends.django.DjangoTemplates',%0A 'DIRS': %5Bos.path.abspath(os.path.join(BASE_DIR, 'templates'))%5D,%0A 'OPTIONS': %7B%0A 'context_processors': %5B%0A 'django.template.context_processors.debug',%0A 'django.template.context_processors.request',%0A 'django.contrib.auth.context_processors.auth',%0A 'django.contrib.messages.context_processors.messages',%0A %5D,%0A 'loaders': %5B%0A ('django.template.loaders.cached.Loader', %5B%0A 'django.template.loaders.filesystem.Loader',%0A 'django.template.loaders.app_directories.Loader',%0A %5D),%0A %5D,%0A %7D,%0A %7D,%0A%5D%0A
faaf1d64fc8c5b15c346f70288235426f0647757
use /usr/bin/env python to run the script
FulltoSNP.py
FulltoSNP.py
#!/usr/bin/env python2.6 import sys import re import itertools import math from Bio import SeqIO #SNP alignment from full Alignment nexus file #Check for correct commandline arguments if len(sys.argv) != 4: print("Usage: FulltoSNP.py <nexus file> <output file> <threshold>") sys.exit(0) #Get filenames InFileName = sys.argv[1] OutFileName = sys.argv[2] threshold = sys.argv[3] PosOutFileName = sys.argv[2]+'positions' record_dict = SeqIO.to_dict(SeqIO.parse(InFileName,"nexus")) #seperate speciesnames from sequences seqs = [] titles = [] for key in record_dict: titles.append(key) x = record_dict[key] seqs.append(x.seq) #transpose string lists thresh = math.ceil(float(threshold) * len(seqs)) print(thresh) seqsTran = zip(*seqs) snps = [] #for every tuple check if value is the same, if so remove tuple pos = 1 positions=[] for s in seqsTran[:]: if len(set(s))!=1 and s.count('-')<= thresh: snps.append(s) positions.append(pos) pos=pos+1 print(len(positions)) seqsTran = [] results = zip(*snps) for i in range(len(results)): results[i] = ''.join(results[i]) SeqDict={} print(len(results[0])) for i in range(len(results)): SeqDict[titles[i]]=results[i] OutFile = open(OutFileName,'w') #write file header OutFile.write("#NEXUS" + "\n" + "Begin DATA;" + "\n\t" + "Dimensions ntax=" + str(len(SeqDict)) + " nchar=" + str(len(results[0])) + ";" + "\n\t" + "Format datatype=DNA gap=-;" + "\n\t" + "Matrix" + "\n") #write all of the SNPs into the new file for key in SeqDict: newSeq = "".join(SeqDict[key]) OutFile.write(key + "\n" + newSeq + "\n") OutFile.write(";" + "\n" + "END;") OutFile.close() OutFile2 = open(PosOutFileName,'w') for i in positions: OutFile2.write(str(i)+'\n') OutFile2.close()
Python
0.000001
@@ -18,11 +18,8 @@ thon -2.6 %0A%0Aim
d01bb6e89c6fcfe8a17d90f3ace175ad26f921b5
Support CSV files beginning with a byte order mark
git-keeper-core/gkeepcore/local_csv_files.py
git-keeper-core/gkeepcore/local_csv_files.py
# Copyright 2016 Nathan Sommer and Ben Coleman # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. """ Provides concrete classes for reading and writing local CSV files as well as a function for getting rows from a local CSV file. """ import csv from gkeepcore.csv_files import CSVReader, CSVWriter, CSVError def csv_rows(file_path: str) -> list: """ Retrieve rows from a local CSV file. :param file_path: path to the file :return: rows as a list of lists """ try: with open(file_path) as f: rows = list(csv.reader(f)) except csv.Error: raise CSVError('Error reading from {0}'.format(file_path)) return rows class LocalCSVReader(CSVReader): """Allows reading from a local CSV file.""" def __init__(self, file_path): """ :param file_path: path to the CSV file to read """ try: with open(file_path) as f: self._rows = list(csv.reader(f)) except (csv.Error, OSError): raise CSVError('Error reading from {0}'.format(file_path)) def get_rows(self) -> list: """ Retrieve the rows from the CSV file :return: list of lists representing all rows from the file """ return self._rows class LocalCSVWriter(CSVWriter): """Allows writing to a local CSV file.""" def __init__(self, file_path): """ :param file_path: path to the CSV file to write """ self._file_path = file_path def write_rows(self, rows): """ Write rows to the file :param rows: list of lists (or tuples) to write """ try: with open(self._file_path, 'w') as f: writer = csv.writer(f) for row in rows: writer.writerow(row) except OSError as e: raise CSVError('Error writing to {0}' .format(self._file_path))
Python
0
@@ -1488,32 +1488,54 @@ h open(file_path +, encoding='utf-8-sig' ) as f:%0A
1657e46cd5c2a81df4cbb73b292b0bf9072d5c51
Fix test: make sure that Isolation Forest actually make a categorical split
h2o-py/tests/testdir_tree/pyunit_tree_irf.py
h2o-py/tests/testdir_tree/pyunit_tree_irf.py
import h2o from h2o.tree import H2OTree from h2o.estimators import H2OIsolationForestEstimator from tests import pyunit_utils def check_tree(tree, tree_number, tree_class = None): assert tree is not None assert len(tree) > 0 assert tree._tree_number == tree_number assert tree._tree_class == tree_class assert tree.root_node is not None assert tree.left_children is not None assert tree.right_children is not None assert tree.thresholds is not None assert tree.nas is not None assert tree.descriptions is not None assert tree.node_ids is not None assert tree.model_id is not None assert tree.levels is not None assert tree.root_node.na_direction is not None assert tree.root_node.id is not None def irf_tree_Test(): prostate = h2o.import_file(path=pyunit_utils.locate("smalldata/prostate/prostate.csv")) prostate["RACE"] = prostate["RACE"].asfactor() iso_model = H2OIsolationForestEstimator() iso_model.train(training_frame = prostate, x = list(set(prostate.col_names) - set(["ID", "CAPSULE"]))) tree = H2OTree(iso_model, 5) check_tree(tree, 5, None) print(tree) if __name__ == "__main__": pyunit_utils.standalone_test(irf_tree_Test) else: irf_tree_Test()
Python
0.000076
@@ -779,151 +779,181 @@ ():%0A -%0A -prostate = h2o.import_file(path=pyunit_utils.locate(%22smalldata/prostate/prostate.csv%22))%0A prostate%5B%22RACE%22%5D = prostate%5B%22RACE%22%5D.asfactor() +cat_frame = h2o.create_frame(cols=10, categorical_fraction=1, seed=42)%0A # check all columns are categorical%0A assert set(cat_frame.types.values()) == set(%5B'enum'%5D)%0A %0A @@ -993,16 +993,23 @@ timator( +seed=42 )%0A is @@ -1040,81 +1040,19 @@ rame - = prostate, x = list(set(prostate.col_names) - set(%5B%22ID%22, %22CAPSULE%22%5D)))%0A +=cat_frame) %0A%0A
0500105b9dc148855b7957963b3949d89a7cc3b4
Remove routes for PayPal
gratipay/models/exchange_route.py
gratipay/models/exchange_route.py
from __future__ import absolute_import, division, print_function, unicode_literals import balanced import braintree from postgres.orm import Model class ExchangeRoute(Model): typname = "exchange_routes" def __bool__(self): return self.error != 'invalidated' __nonzero__ = __bool__ @classmethod def from_id(cls, id): return cls.db.one(""" SELECT r.*::exchange_routes FROM exchange_routes r WHERE id = %(id)s """, locals()) @classmethod def from_network(cls, participant, network): participant_id = participant.id r = cls.db.one(""" SELECT r.*::exchange_routes FROM current_exchange_routes r WHERE participant = %(participant_id)s AND network = %(network)s """, locals()) if r: r.__dict__['participant'] = participant return r @classmethod def from_address(cls, participant, network, address): participant_id = participant.id r = cls.db.one(""" SELECT r.*::exchange_routes FROM exchange_routes r WHERE participant = %(participant_id)s AND network = %(network)s AND address = %(address)s """, locals()) if r: r.__dict__['participant'] = participant return r @classmethod def associate_balanced(cls, participant, balanced_account, network, address): if network == 'balanced-cc': obj = balanced.Card.fetch(address) else: assert network == 'balanced-ba', network # sanity check obj = balanced.BankAccount.fetch(address) obj.associate_to_customer(balanced_account) return cls.insert(participant, network, address) @classmethod def insert(cls, participant, network, address, error='', fee_cap=None): participant_id = participant.id r = cls.db.one(""" INSERT INTO exchange_routes (participant, network, address, error, fee_cap) VALUES (%(participant_id)s, %(network)s, %(address)s, %(error)s, %(fee_cap)s) RETURNING exchange_routes.*::exchange_routes """, locals()) if network == 'balanced-cc': participant.update_giving_and_tippees() r.__dict__['participant'] = participant return r def invalidate(self): if self.network == 'balanced-ba': balanced.BankAccount.fetch(self.address).delete() elif self.network == 'balanced-cc': balanced.Card.fetch(self.address).unstore() elif self.network == 'braintree-cc': braintree.PaymentMethod.delete(self.address) self.update_error('invalidated') def update_error(self, new_error, propagate=True): id = self.id old_error = self.error if old_error == 'invalidated': return self.db.run(""" UPDATE exchange_routes SET error = %(new_error)s WHERE id = %(id)s """, locals()) self.set_attributes(error=new_error) # Update the receiving amounts of tippees if requested and necessary if not propagate or self.network != 'balanced-cc': return if self.participant.is_suspicious or bool(new_error) == bool(old_error): return self.participant.update_giving_and_tippees()
Python
0.000001
@@ -2747,24 +2747,291 @@ f.address)%0A%0A + # For Paypal, we remove the record entirely to prevent%0A # an integrity error if the user tries to add the route again%0A if self.network == 'paypal':%0A self.db.run(%22DELETE FROM exchange_routes WHERE id=%25s%22, (self.id,))%0A else:%0A self
7e5477682dfc0d907fe55a489c75179a6e4c832b
fix Swale import script
polling_stations/apps/data_collection/management/commands/import_swale.py
polling_stations/apps/data_collection/management/commands/import_swale.py
from data_collection.management.commands import BaseShpStationsShpDistrictsImporter class Command(BaseShpStationsShpDistrictsImporter): srid = 27700 council_id = 'E07000113' districts_name = 'shp/Swale Polling Districts' stations_name = 'shp/Swale Polling Stations.shp' #elections = ['local.kent.2017-05-04'] elections = [] def district_record_to_dict(self, record): code = str(record[0]).strip() return { 'internal_council_id': code, 'name': str(record[1]).strip(), 'polling_station_id': code, } def station_record_to_dict(self, record): return { 'internal_council_id': str(record[0]).strip(), 'postcode': '', 'address': str(record[4]).strip(), }
Python
0.000002
@@ -41,27 +41,27 @@ import Base -Shp +Csv StationsShpD @@ -96,19 +96,19 @@ and(Base -Shp +Csv Stations @@ -252,45 +252,66 @@ = ' -shp/ Swale -Polling Stations.shp +21 Feb 2017 Polling scheme station numbers.csv '%0A -# elec @@ -347,27 +347,8 @@ 04'%5D -%0A elections = %5B%5D %0A%0A @@ -536,48 +536,8 @@ (),%0A - 'polling_station_id': code,%0A @@ -589,34 +589,140 @@ ecord):%0A -%0A + - return %7B%0A +codes = record.pd.split(%22 and %22)%0A stations = %5B%5D%0A for code in codes:%0A stations.append(%7B%0A @@ -748,38 +748,20 @@ il_id': -str(record%5B0%5D).strip() +code ,%0A @@ -766,16 +766,20 @@ + + 'postcod @@ -798,16 +798,20 @@ + 'address @@ -817,38 +817,139 @@ s': -str( record -%5B4%5D).strip(),%0A %7D +.premises,%0A 'polling_district_id': code,%0A 'location': None,%0A %7D)%0A return stations %0A
960688f925cba5c82e1c7c0b01bc032957023118
Fix flake8 violations
f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py
f5_openstack_agent/lbaasv2/drivers/bigip/tenants.py
"""Tenants Manager.""" # Copyright 2014 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from oslo_log import log as logging from f5_openstack_agent.lbaasv2.drivers.bigip import exceptions as f5ex from f5_openstack_agent.lbaasv2.drivers.bigip.network_helper import \ NetworkHelper from f5_openstack_agent.lbaasv2.drivers.bigip.system_helper import SystemHelper LOG = logging.getLogger(__name__) class BigipTenantManager(object): """Create network connectivity for a bigip.""" def __init__(self, conf, driver): """Create a BigipTenantManager.""" self.conf = conf self.driver = driver self.system_helper = SystemHelper() self.network_helper = NetworkHelper() self.service_adapter = self.driver.service_adapter def assure_tenant_created(self, service): """Create tenant partition.""" tenant_id = service['loadbalancer']['tenant_id'] traffic_group = self.driver.service_to_traffic_group(service) traffic_group = '/Common/' + traffic_group service["traffic_group"] = traffic_group # create tenant folder folder_name = self.service_adapter.get_folder_name(tenant_id) LOG.debug("Creating tenant folder %s" % folder_name) for bigip in self.driver.get_config_bigips(): if not self.system_helper.folder_exists(bigip, folder_name): folder = self.service_adapter.get_folder(service) try: self.system_helper.create_folder(bigip, folder) except Exception as err: LOG.exception("Error creating folder %s" % (folder)) raise f5ex.SystemCreationException( "Folder creation error for tenant %s" % (tenant_id)) if not self.driver.disconnected_service.network_exists( bigip, folder_name): try: self.driver.disconnected_service.create_network( bigip, folder_name) except Exception as err: LOG.exception("Error creating disconnected network %s." % (folder_name)) raise f5ex.SystemCreationException( "Disconnected network create error for tenant %s" % (tenant_id)) # create tenant route domain if self.conf.use_namespaces: for bigip in self.driver.get_all_bigips(): if not self.network_helper.route_domain_exists(bigip, folder_name): try: self.network_helper.create_route_domain( bigip, folder_name, self.conf.f5_route_domain_strictness) except Exception as err: LOG.exception(err.message) raise f5ex.RouteDomainCreationException( "Failed to create route domain for " "tenant in %s" % (folder_name)) def assure_tenant_cleanup(self, service, all_subnet_hints): """Delete tenant partition.""" # Called for every bigip only in replication mode, # otherwise called once. for bigip in self.driver.get_config_bigips(): subnet_hints = all_subnet_hints[bigip.device_name] self._assure_bigip_tenant_cleanup(bigip, service, subnet_hints) # called for every bigip only in replication mode. # otherwise called once def _assure_bigip_tenant_cleanup(self, bigip, service, subnet_hints): tenant_id = service['loadbalancer']['tenant_id'] self._remove_tenant_replication_mode(bigip, tenant_id) def _remove_tenant_replication_mode(self, bigip, tenant_id): # Remove tenant in replication sync-mode partition = self.service_adapter.get_folder_name(tenant_id) domain_names = self.network_helper.get_route_domain_names(bigip, partition) for domain_name in domain_names: try: self.network_helper.delete_route_domain(bigip, partition, domain_name) except Exception as err: LOG.error("Failed to delete route domain %s. " "%s. Manual intervention might be required." % (domain_name, err.message)) if self.driver.disconnected_service.network_exists( bigip, partition): try: self.driver.disconnected_service.delete_network(bigip, partition) except Exception as err: LOG.error("Failed to delete disconnected network %s. " "%s. Manual intervention might be required." % (partition, err.message)) try: self.system_helper.delete_folder(bigip, partition) except Exception: LOG.error( "Folder deletion exception for tenant partition %s occurred." % tenant_id) try: self.system_helper.purge_folder_contents(bigip, partition) self.system_helper.delete_folder(bigip, partition) except Exception as err: LOG.exception("%s" % err.message) raise f5ex.SystemDeleteException( "Failed to destroy folder %s manual cleanup might be " "required." % partition)
Python
0
@@ -2430,32 +2430,24 @@ - bigip, folde @@ -2705,17 +2705,16 @@ ork %25s.%22 - %25%0A @@ -2837,20 +2837,16 @@ - %22Disconn @@ -2881,36 +2881,32 @@ or tenant %25s%22 %25%0A -
fc7f51877b6b991ad5a25afb755dd7a35e91dfea
Use get_or_create to avoid duplicate objects
cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py
cla_backend/apps/legalaid/migrations/0022_default_contact_for_research_methods.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations import uuid from cla_common.constants import RESEARCH_CONTACT_VIA def create_default_contact_for_research_methods(apps, schema_editor): ContactResearchMethods = apps.get_model("legalaid", "ContactResearchMethod") for value, name in RESEARCH_CONTACT_VIA: ContactResearchMethods.objects.create(method=value, reference=uuid.uuid4()).save() def rollback_default_contact_for_research_methods(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [("legalaid", "0021_auto_20190515_1042")] operations = [ migrations.RunPython( create_default_contact_for_research_methods, rollback_default_contact_for_research_methods ) ]
Python
0.000001
@@ -394,16 +394,23 @@ objects. +get_or_ create(m @@ -422,16 +422,27 @@ =value, +defaults=%7B%22 referenc @@ -442,17 +442,19 @@ eference -= +%22: uuid.uui @@ -461,15 +461,9 @@ d4() -).save( +%7D )%0A%0A%0A
d8300217d5a65465144772000e8fcbe2279758f8
Make _markdown property private on MarkdownDocumentFormat.
grow/documents/document_format.py
grow/documents/document_format.py
"""Document formatting specifics for parsing and working with documents.""" import copy import markdown from markdown.extensions import tables from grow.common import markdown_extensions from grow.common import structures from grow.common import utils from grow.documents import document_front_matter as doc_front_matter BOUNDARY_SEPARATOR = '---' class Error(Exception): pass class BadFormatError(Error, ValueError): pass class BadLocalesError(BadFormatError): pass class DocumentFormat(object): """ Document formatting specifics for parsing and working with documents. Defines how to handle documents formatted in various syntax formats. """ def __init__(self, doc): self._doc = doc self._content = None self._raw_content = None @staticmethod def from_doc(*args, **kwargs): doc = kwargs.get('doc', None) if not doc: raise BadFormatError( 'Missing `doc` keyword argument for creating format') if doc.ext in ('.html', '.htm'): return HtmlDocumentFormat(*args, **kwargs) if doc.ext in ('.markdown', '.mdown', '.mkdn', '.mkd', '.md'): return MarkdownDocumentFormat(*args, **kwargs) if doc.ext in ('.yaml', '.yml'): return YamlDocumentFormat(*args, **kwargs) return TextDocumentFormat(*args, **kwargs) @staticmethod def format_doc(front_matter, content): if front_matter and content: return '{0}\n{1}\n{0}\n{2}\n'.format( BOUNDARY_SEPARATOR, front_matter.strip(), content.strip()) elif front_matter: return '{}\n'.format(front_matter.strip()) return '{}\n'.format(content.strip()) def _parse_content(self): """Parse the content from the raw content.""" _, parsed_content = doc_front_matter.DocumentFrontMatter\ .split_front_matter(self.raw_content) return parsed_content def _parse_front_matter(self): """Parse the front matter from the raw content.""" return doc_front_matter.DocumentFrontMatter( self._doc) @property def content(self): """Lazy load the content after checking the content cache.""" if self._content: return self._content self._content = self._parse_content() return self._content @utils.cached_property def front_matter(self): cached_front_matter = self._doc.pod.podcache.document_cache\ .get_property(self._doc, 'front_matter') if cached_front_matter: return doc_front_matter.DocumentFrontMatter( self._doc, raw_front_matter=cached_front_matter) front_matter = self._parse_front_matter() self._doc.pod.podcache.document_cache.add_property( self._doc, 'front_matter', front_matter.export()) return front_matter @property def raw_content(self): if self._raw_content: return self._raw_content if self._doc.exists: self._raw_content = self._doc.pod.read_file(self._doc.pod_path) return self._raw_content @utils.cached_property def formatted(self): return self.content def to_raw_content(self): """Formats the front matter and content into a raw_content string.""" raw_front_matter = self.front_matter.export() return self.format_doc(raw_front_matter, self.content) def update(self, fields=utils.SENTINEL, content=utils.SENTINEL): """Updates content and frontmatter.""" if fields is not utils.SENTINEL: raw_front_matter = utils.dump_yaml(fields) self.front_matter.update_raw_front_matter(raw_front_matter) self._doc.pod.podcache.document_cache.add_property( self._doc, 'front_matter', self.front_matter.export()) if content is not utils.SENTINEL: self._content = content self._raw_content = self.to_raw_content() class HtmlDocumentFormat(DocumentFormat): @utils.cached_property def formatted(self): val = self.content return val.decode('utf-8') if val is not None else None class MarkdownDocumentFormat(DocumentFormat): def get_markdown_config(self): """Get the markdown config for all extensions.""" if 'markdown' in self._doc.pod.podspec: markdown_config = self._doc.pod.podspec.markdown if 'extensions' in markdown_config: return markdown_config['extensions'] return [] def get_ext_config(self, kind): """Get the markdown config for a specific extension.""" for extension in self.get_markdown_config(): if extension.get('kind', '') != kind: continue return structures.AttributeDict(extension) return structures.AttributeDict({}) @utils.cached_property def markdown(self): extension_configs = {} extensions = [ tables.TableExtension(), markdown_extensions.TocExtension(pod=self._doc.pod), markdown_extensions.CodeBlockExtension(self._doc.pod), markdown_extensions.IncludeExtension(self._doc.pod), markdown_extensions.UrlExtension(self._doc.pod), 'markdown.extensions.fenced_code', 'markdown.extensions.codehilite', ] for config in self.get_markdown_config(): if config['kind'] in extensions: continue if config['kind'].startswith('markdown.extensions'): extensions.append(config['kind']) ext_config = copy.deepcopy(config) ext_config.pop('kind', None) if ext_config: extension_configs[config['kind']] = ext_config config = self.get_ext_config('markdown.extensions.codehilite') codehilite_config = { 'pygments_style': 'default', 'noclasses': True, 'css_class': 'code', } if 'theme' in config: codehilite_config['pygments_style'] = config.theme if 'classes' in config: codehilite_config['noclasses'] = not config.classes if 'class_name' in config: codehilite_config['css_class'] = config.class_name extension_configs['markdown.extensions.codehilite'] = codehilite_config md = markdown.Markdown(extensions=extensions, extension_configs=extension_configs) html = md.convert(self.content.decode('utf-8')) if self.content else None return md, html @utils.cached_property def toc(self): return self.markdown.toc @utils.cached_property def formatted(self): md, html = self.markdown return html class TextDocumentFormat(DocumentFormat): pass class YamlDocumentFormat(DocumentFormat): def _parse_content(self): return None def _parse_front_matter(self): return doc_front_matter.DocumentFrontMatter( self._doc, raw_front_matter=self.raw_content)
Python
0
@@ -4937,16 +4937,17 @@ def +_ markdown @@ -6691,28 +6691,33 @@ -return +md, html = self. +_ markdown .toc @@ -6712,16 +6712,34 @@ markdown +%0A return md .toc%0A%0A @@ -6812,16 +6812,17 @@ = self. +_ markdown
c8ccee311b5939b116987c1a6192cc2935f9ff3b
test bad query op
test/test_utility_functions.py
test/test_utility_functions.py
import testutils import json import psycopg2 class TestSplitQueries(testutils.BedquiltTestCase): def _assert_examples(self, examples): for query, match, specials in examples: result = self._query(""" select * from bq_split_queries('{}'::jsonb) """.format(json.dumps(query))) self.assertEqual(json.loads(result[0][0]), match) self.assertEqual(result[0][1], specials) def test_simple_queries_with_no_specials(self): examples = [ ({'a': {'b': 1}}, {'a': {'b': 1}}, []), ({'a': 1, 'b': 2}, {'a': 1, 'b': 2}, []) ] self._assert_examples(examples) def test_advanced_queries(self): examples = [ ( { 'a': { 'b': { '$eq': 22 } } }, {}, ["and bq_jdoc #> '{a,b}' = '22'::jsonb"] ), ( { 'a': { 'b': { '$eq': 22 } }, 'c': 44 }, { 'c': 44 }, ["and bq_jdoc #> '{a,b}' = '22'::jsonb"] ), ( { 'a': { 'b': { '$eq': 22 }, 'c': 44 } }, { 'a': {'c': 44} }, ["and bq_jdoc #> '{a,b}' = '22'::jsonb"] ) ] self._assert_examples(examples) def test_supported_ops(self): examples = [ ( {'a': {'b': {'$eq': 42}}}, {}, ["and bq_jdoc #> '{a,b}' = '42'::jsonb"] ), ( {'a': {'b': {'$noteq': 42}}}, {}, ["and bq_jdoc #> '{a,b}' != '42'::jsonb"] ), ( {'a': {'b': {'$gte': 42}}}, {}, ["and bq_jdoc #> '{a,b}' >= '42'::jsonb"] ), ( {'a': {'b': {'$gt': 42}}}, {}, ["and bq_jdoc #> '{a,b}' > '42'::jsonb"] ), ( {'a': {'b': {'$lte': 42}}}, {}, ["and bq_jdoc #> '{a,b}' <= '42'::jsonb"] ), ( {'a': {'b': {'$lt': 42}}}, {}, ["and bq_jdoc #> '{a,b}' < '42'::jsonb"] ), ( {'a': {'b': {'$in': [22, 42]}}}, {}, ["and bq_jdoc #> '{a,b}' <@ '[22, 42]'::jsonb"] ), ] self._assert_examples(examples)
Python
0.998676
@@ -2997,28 +2997,346 @@ ._assert_examples(examples)%0A +%0A def test_bad_op(self):%0A query = %7B%0A 'a': %7B'$totallynotavalidop': 42%7D%0A %7D%0A with self.assertRaises(psycopg2.InternalError):%0A self.cur.execute(%22%22%22%0A select * from bq_split_queries('%7B%7D'::jsonb)%0A %22%22%22.format(json.dumps(query)))%0A self.conn.rollback()%0A
e570c537a8e6889732fdaec987b7676f3e065534
Fix recursive loop in PdfHexString.__repr__
gymnast/pdf_types/string_types.py
gymnast/pdf_types/string_types.py
""" PDF string-like objects """ import binascii import codecs import io from .common import PdfType from ..exc import PdfParseError, PdfError from ..pdf_codec import register_codec # Go ahead and register the codec here, I guess. register_codec() class PdfString(PdfType): """Base class from which all of our string-like classes will inherit""" def __lt__(self, other): return self._parsed_bytes.__lt__(other) def __le__(self, other): return self._parsed_bytes.__le__(other) def __eq__(self, other): return self._parsed_bytes.__eq__(other) def __ne__(self, other): return self._parsed_bytes.__ne__(other) def __gt__(self, other): return self._parsed_bytes.__gt__(other) def __ge__(self, other): return self._parsed_bytes.__ge__(other) def __bool__(self): return self._parsed_bytes.__bool__() def __bytes__(self): return self._parsed_bytes def __hash__(self): return self._parsed_bytes.__hash__() def __repr__(self): return self.__class__.__name__+"("+self.raw_bytes.__repr__()+")" def __init__(self, data): super(PdfString, self).__init__() self.raw_bytes = data self._parsed_bytes = self.parse_bytes(data) @staticmethod def parse_bytes(data): raise NotImplementedError class PdfLiteralString(str, PdfString): """PDF Literal strings""" def __new__(cls, data): try: string = cls._decode_bytes(cls.parse_bytes(data)) except UnicodeDecodeError: string = codecs.encode(cls.parse_bytes(data), 'hex_codec').decode() obj = str.__new__(cls, string) obj.__init__(data) return obj def __init__(self, data): PdfString.__init__(self, data) str.__init__(self) @staticmethod def _decode_bytes(data): """Detect the encoding method and return the decoded string""" # Are we UTF-16BE? Good. if data[:2] == '\xFE\xFF': return data.decode('utf_16_be') # If the string isn't UTF-16BE, it follows PDF standard encoding # described in Appendix D of the reference. return data.decode('pdf_doc') ESCAPES = {b'n' : b'\n', b'r' : b'\r', b't' : b'\t', b'b' : b'\b', b'f' : b'\f', b'(' : b'(', b')' : b')', b'\n' : b'', b'\r' : b''} @staticmethod def _parse_escape(data): r"""Handle escape sequences in literal PDF strings. This should be pretty straightforward except that there are line continuations, so \\n, \\r\n, and \\r are ignored. Moreover, actual newline characters are also valid. It's very stupid. See pp. 53-56 in the Reference if you want to be annoyed. Arguments: data - io.BytesIO-like object Returns the unescaped bytes""" e_str = data.read(1) try: val = PdfLiteralString.ESCAPES[e_str] except KeyError: # Not a normal escape, hopefully it's an octal if not e_str.isdigit(): print(e_str) raise PdfParseError('Invalid escape sequence in literal string') else: # Handle \\r\n by skipping the next character if e_str == b'\r' and data.peek(1)[:1] == b'\n': data.seek(1,1) return val # If it's not one of the above, it must be an octal of # length at most 3 for i in range(2): e_str += data.read(1) if not e_str.isdigit(): data.seek(-1, 1) return bytes((min(int(e_str[:-1], 8),255),)) return bytes((min(int(e_str, 8),255),)) @staticmethod def parse_bytes(data): """Extract a PDF escaped string into a nice python bytes object.""" iodata = io.BufferedReader(io.BytesIO(data)) result = io.BytesIO() char = iodata.read(1) while char: if char == b'\\': result.write(PdfLiteralString._parse_escape(iodata)) else: result.write(char) char = iodata.read(1) return bytes(result.getvalue()) class PdfHexString(PdfString): """Hex strings, mostly used for ID values""" def __init__(self, data): super(PdfHexString, self).__init__(data) @property def _text(self): return '0x'+binascii.hexlify(self._parsed_bytes).decode() @staticmethod def parse_bytes(token): hstr = token.decode() if len(hstr) % 2: hstr += '0' return codecs.decode(hstr, 'hex_codec') def __repr__(self): return str(self) def pdf_encode(self): return b'<'+self.raw_bytes+b'>' class PdfComment(PdfType, str): """Comments""" def __new__(cls, obj): if isinstance(obj, (bytes, bytearray)): obj = obj.decode(errors='surrogateescape') return str.__new__(cls, obj) def __init__(self, *args, **kwargs): PdfType.__init__(self) str.__init__(self) def pdf_encode(self): return b'%' + bytes(self.parsed_object) class PdfName(PdfType, str): """PDF name objects, mostly use for dict keys""" def __new__(cls, *args, **kwargs): return str.__new__(cls, *args, **kwargs) def __init__(self, *args, **kwargs): PdfType.__init__(self) str.__init__(self) @classmethod def from_token(cls, token): """Parse names by stripping the leading / and replacing instances of #YY with the character b'\\xYY' and decoding to unicode.""" try: name = bytearray(token[token[0] == '/':].encode()) except AttributeError: # Hopefully this means that it's bytes name = bytearray(token[token[:1] == b'/':]) hash_pos = name.find(b'#') while hash_pos > 0: try: new_char = bytes((int(name[hash_pos+1:hash_pos+3], 16),)) except ValueError: msg = 'Invalid hex code in name {} ({})' raise PdfError(msg.format(token, name[hash_pos:hash_pos+3])) name[hash_pos:hash_pos+3] = new_char hash_pos = name.find(b'#', hash_pos) return cls(bytes(name).decode())
Python
0.000114
@@ -4727,24 +4727,73 @@ n str(self)%0A + def __str__(self):%0A return self._text%0A def pdf_