commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
ae3374305bad49c358a173e26490c5c90b219208
test for multiple open-read-close cycle
tests/multiple_readings.py
tests/multiple_readings.py
Python
0
@@ -0,0 +1,579 @@ +import serial%0Aimport struct%0Aimport time%0Aimport pandas as pd%0Aimport numpy as np%0A%0A%0Adef measure():%0A start_time = time.time()%0A with serial.Serial('/dev/cu.usbmodem14121', 1000000, timeout=1) as inport:%0A open_time = time.time()%0A data = inport.read(100)%0A read_time = time.time()%0A close_time = time.time()%0A return (open_time - start_time, read_time - open_time, close_time - read_time, len(data))%0A%0Adf = pd.DataFrame.from_records(%0A (measure() for i in range(100)),%0A columns=%5B%22open%22, %22read%22, %22close%22, %22datalength%22%5D)%0A%0Aprint(df)%0A%0Aprint(df.describe())%0A
85d58c1e6e2ee7eae46ca46e25ef7338b23eac89
fix from in search reply
tests/olpc-buddy-search.py
tests/olpc-buddy-search.py
""" test OLPC search buddy """ import dbus from servicetest import call_async, EventPattern from gabbletest import exec_test, make_result_iq, acknowledge_iq from twisted.words.xish import domish, xpath from twisted.words.protocols.jabber.client import IQ NS_OLPC_BUDDY_PROPS = "http://laptop.org/xmpp/buddy-properties" NS_OLPC_ACTIVITIES = "http://laptop.org/xmpp/activities" NS_OLPC_CURRENT_ACTIVITY = "http://laptop.org/xmpp/current-activity" NS_OLPC_ACTIVITY_PROPS = "http://laptop.org/xmpp/activity-properties" NS_OLPC_BUDDY = "http://laptop.org/xmpp/buddy" NS_OLPC_ACTIVITY = "http://laptop.org/xmpp/activity" NS_PUBSUB = "http://jabber.org/protocol/pubsub" NS_DISCO_INFO = "http://jabber.org/protocol/disco#info" NS_DISCO_ITEMS = "http://jabber.org/protocol/disco#items" NS_AMP = "http://jabber.org/protocol/amp" def test(q, bus, conn, stream): conn.Connect() _, iq_event, disco_event = q.expect_many( EventPattern('dbus-signal', signal='StatusChanged', args=[0, 1]), EventPattern('stream-iq', to=None, query_ns='vcard-temp', query_name='vCard'), EventPattern('stream-iq', to='localhost', query_ns=NS_DISCO_ITEMS)) acknowledge_iq(stream, iq_event.stanza) # announce Gadget service reply = make_result_iq(stream, disco_event.stanza) query = xpath.queryForNodes('/iq/query', reply)[0] item = query.addElement((None, 'item')) item['jid'] = 'gadget.localhost' stream.send(reply) # wait for Gadget disco#info query event = q.expect('stream-iq', to='gadget.localhost', query_ns=NS_DISCO_INFO) reply = make_result_iq(stream, event.stanza) query = xpath.queryForNodes('/iq/query', reply)[0] identity = query.addElement((None, 'identity')) identity['category'] = 'collaboration' identity['type'] = 'gadget' identity['name'] = 'OLPC Gadget' feature = query.addElement((None, 'feature')) feature['var'] = NS_OLPC_BUDDY feature = query.addElement((None, 'feature')) feature['var'] = NS_OLPC_ACTIVITY stream.send(reply) buddy_info_iface = dbus.Interface(conn, 'org.laptop.Telepathy.BuddyInfo') call_async(q, conn, 'RequestHandles', 1, ['bob@localhost']) event = q.expect('dbus-return', method='RequestHandles') handles = event.value[0] bob_handle = handles[0] call_async(q, buddy_info_iface, 'GetProperties', bob_handle) # wait for pubsub query event = q.expect('stream-iq', to='bob@localhost', query_ns=NS_PUBSUB) query = event.stanza assert query['to'] == 'bob@localhost' # send an error as reply reply = IQ(stream, 'error') reply['id'] = query['id'] reply['to'] = 'alice@localhost' reply['from'] = 'bob@localhost' stream.send(reply) # wait for buddy search query event = q.expect('stream-iq', to='gadget.localhost', query_ns=NS_OLPC_BUDDY) buddies = xpath.queryForNodes('/iq/query/buddy', event.stanza) assert len(buddies) == 1 buddy = buddies[0] assert buddy['jid'] == 'bob@localhost' # send reply to the search query reply = make_result_iq('stream', event.stanza) reply['from'] = 'index.jabber.laptop.org' reply['to'] = 'alice@localhost' query = xpath.queryForNodes('/iq/query', reply)[0] buddy = query.addElement((None, "buddy")) buddy['jid'] = 'bob@localhost' properties = buddy.addElement((NS_OLPC_BUDDY_PROPS, "properties")) property = properties.addElement((None, "property")) property['type'] = 'str' property['name'] = 'color' property.addContent('#005FE4,#00A0FF') stream.send(reply) event = q.expect('dbus-return', method='GetProperties') props = event.value[0] assert props == {'color': '#005FE4,#00A0FF' } if __name__ == '__main__': exec_test(test)
Python
0
@@ -3132,31 +3132,24 @@ = ' -index.jabber.laptop.org +gadget.localhost '%0A
4e3644234fab9cb14a3d511b24bce3ed8a1446e0
Add in a minor testcase.
tests/scales/test_minor.py
tests/scales/test_minor.py
Python
0.000001
@@ -0,0 +1,1710 @@ +# Copyright (c) Paul R. Tagliamonte %[email protected]%3E, 2015%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a%0A# copy of this software and associated documentation files (the %22Software%22),%0A# to deal in the Software without restriction, including without limitation%0A# the rights to use, copy, modify, merge, publish, distribute, sublicense,%0A# and/or sell copies of the Software, and to permit persons to whom the%0A# Software is furnished to do so, subject to the following conditions:%0A# %0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A# %0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL%0A# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING%0A# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER%0A# DEALINGS IN THE SOFTWARE.%0A%0Afrom muse.scales.minor import NaturalMinorScale%0Afrom muse.tone import Tone%0A%0Adef take(it, count):%0A for _ in range(count):%0A yield next(it)%0A%0A%0ASCALE = %5B'B%E2%99%AD4', 'C4', 'C%E2%99%AF4', 'E%E2%99%AD4', 'F4', 'F%E2%99%AF4', 'G%E2%99%AF4', 'B%E2%99%AD5'%5D%0A%0A%0Adef test_scale_acending_iteratation():%0A cs = NaturalMinorScale(Tone(100)) # Bb4%0A series = list(take(cs.acending(), 8))%0A assert %5Bx._tone_name() for x in series%5D == SCALE%0A%0A%0Adef test_scale_acending_iteratation():%0A cs = NaturalMinorScale(Tone(1300)) # Bb5%0A series = list(take(cs.decending(), 8))%0A assert %5Bx._tone_name() for x in series%5D == list(reversed(SCALE))%0A
e98065e04cfd52bb369d3b07d29f37fb458baa91
add solution for Merge Intervals
src/mergeIntervals.py
src/mergeIntervals.py
Python
0
@@ -0,0 +1,692 @@ +# Definition for an interval.%0A# class Interval:%0A# def __init__(self, s=0, e=0):%0A# self.start = s%0A# self.end = e%0A%0A%0Aclass Solution:%0A # @param intervals, a list of Interval%0A # @return a list of Interval%0A%0A def merge(self, intervals):%0A if not intervals:%0A return %5B%5D%0A res = %5B%5D%0A intervals = sorted(%5B%5Bi.start, i.end%5D for i in intervals%5D)%0A prev = intervals%5B0%5D%0A for i in xrange(1, len(intervals)):%0A if intervals%5Bi%5D%5B0%5D %3C= prev%5B1%5D:%0A prev%5B1%5D = max(prev%5B1%5D, intervals%5Bi%5D%5B1%5D)%0A else:%0A res.append(prev)%0A prev = intervals%5Bi%5D%0A res.append(prev)%0A return res%0A
6f8c64ed6f99493811cab54137a1eed44d851260
Add python script to get group and module given a class name
scripts/GetGroupAndModuleFromClassName.py
scripts/GetGroupAndModuleFromClassName.py
Python
0.000003
@@ -0,0 +1,907 @@ +#!/usr/bin/env python%0A%0A%22%22%22 Given the path to the ITK Source Dir%0Aprint group and module of a given class%0A%0Afor instance, try:%0A%0A ./GetGroupAndModuleFromClassName /path/to/ITK Image%0A%22%22%22%0A%0Aimport sys%0Aimport os%0A%0Aitk_dir = sys.argv%5B1%5D%0Acmakefile = os.path.join( itk_dir, 'CMake', 'UseITK.cmake' )%0A%0Aif not os.path.exists( cmakefile ):%0A print 'Error: wrong path'%0Aelse:%0A class_name = sys.argv%5B2%5D%0A path = ''%0A%0A for root, dirs, files in os.walk( os.path.join( itk_dir, 'Modules' ) ):%0A for f in files:%0A if f == 'itk' + class_name + '.h':%0A path = root%0A%0A if len( path ) != 0:%0A # let's extract the Group%0A temp = path.strip( os.path.join( itk_dir, 'Modules' ) )%0A temp = temp.strip( 'include' )%0A%0A GroupModule = temp.split( '/' )%0A print 'Group: ' + GroupModule%5B 0 %5D%0A print 'Module: ' + GroupModule%5B 1 %5D%0A%0A else:%0A print 'Error: this class is not part of itk'%0A%0A
3ff18745a561ab28e04d9218e00fc0aa367631f5
add `solution` module
src/obpds/solution.py
src/obpds/solution.py
Python
0.000001
@@ -0,0 +1,1505 @@ +#%0A# Copyright (c) 2015, Scott J Maddox%0A#%0A# This file is part of Open Band Parameters Device Simulator (OBPDS).%0A#%0A# OBPDS is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published%0A# by the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# OBPDS is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with OBPDS. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A#############################################################################%0A%0Aimport numpy%0A%0A%0A__all__ = %5B'Solution', 'EquilibriumSolution'%5D%0A%0A%0Aclass Solution(object):%0A pass%0A%0Aclass FlatbandSolution(Solution):%0A def __init__(self, T, N, x, Ev, Ec, Ei):%0A self.T = T%0A self.N = N%0A self.x = x%0A self.Ev = Ev%0A self.Ec = Ec%0A self.Ei = Ei%0A%0Aclass EquilibriumSolution(Solution):%0A def __init__(self, T, N, x, Na, Nd, Ev, Ec, Ei, psi, n, p):%0A self.T = T%0A self.N = N%0A self.x = x%0A self.Na = Na%0A self.Nd = Nd%0A self.Ev = Ev%0A self.Ec = Ec%0A self.Ei = Ei%0A self.psi = psi%0A self.n = n%0A self.p = p%0A self.Ef = numpy.zeros(N)
9a2f68d14ae2d576c59035c67ffa12c96b4f748a
Add provider tests
test_saau.py
test_saau.py
Python
0
@@ -0,0 +1,219 @@ +from saau.loading import load_image_providers, load_service_providers%0A%0A%0Adef test_load_service_providers():%0A assert load_service_providers(None)%0A%0A%0Adef test_load_image_providers():%0A assert load_image_providers(None)
018be657ea3e088b3116e8a78fe81713a2a30e29
Add tifftopdf, a frontend for tiff2pdf and tiffinfo.
tifftopdf.py
tifftopdf.py
Python
0
@@ -0,0 +1,2191 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A#%0A# Author: R.F. Smith %[email protected]%3E%0A# 2012-06-29%0A#%0A# To the extent possible under law, Roland Smith has waived all copyright and%0A# related or neighboring rights to NAME. This work is published from the%0A# Netherlands. See http://creativecommons.org/publicdomain/zero/1.0/%0A%0A'''Description.'''%0A%0Aimport os%0Aimport sys%0Aimport subprocess%0Afrom multiprocessing import Pool, Lock%0A%0Agloballock = Lock()%0A%0Adef checkfor(args):%0A %22%22%22Make sure that a program necessary for using this script is available.%22%22%22%0A try:%0A f = open('/dev/null')%0A subprocess.call(args, stderr=subprocess.STDOUT, stdout=f)%0A f.close()%0A except:%0A print %22Required program '%7B%7D' not found! exiting.%22.format(args%5B0%5D)%0A sys.exit(1)%0A%0Adef process(fname):%0A %22%22%22Process the file named fname.%22%22%22%0A try:%0A args = %5B'tiffinfo', fname%5D%0A # Gather information about the TIFF file.%0A txt = subprocess.check_output(args).split()%0A if not txt%5B7%5D == 'Width:':%0A raise ValueError%0A width = float(txt%5B8%5D)%0A length = float(txt%5B11%5D)%0A xres = float(txt%5B13%5D%5B:-1%5D)%0A yres = float(txt%5B14%5D)%0A # Create the output file name.%0A if fname.endswith(('.tif', '.TIF')):%0A outname = fname%5B:-4%5D%0A elif fname.endswith(('.tiff', '.TIFF')):%0A outname = fname%5B:-5%5D%0A outname = outname.replace(' ', '_') + '.pdf'%0A args = %5B'tiff2pdf', '-w', str(width/xres), '-l', str(length/xres), %0A '-x', str(xres), '-y', str(yres), '-o', outname, fname%5D%0A subprocess.call(args)%0A globallock.acquire()%0A print %22File '%7B%7D' converted to '%7B%7D'.%22.format(fname, outname)%0A globallock.release()%0A %0A except:%0A globallock.acquire()%0A print %22Converting %7B%7D failed.%22.format(fname)%0A globallock.release()%0A%0A## This is the main program ##%0Aif __name__ == '__main__':%0A if len(sys.argv) == 1:%0A path, binary = os.path.split(sys.argv%5B0%5D)%0A print %22Usage: %7B%7D %5Bfile ...%5D%22.format(binary)%0A sys.exit(0)%0A checkfor(%5B'tiffinfo'%5D)%0A checkfor(%5B'tiff2pdf'%5D)%0A p = Pool()%0A p.map(process, sys.argv%5B1:%5D)%0A p.close()%0A
6dcbb2004271860b7d2e8bf0d12da46c925f151c
add a utility to show/set/clear software write protect on a lun
tools/swp.py
tools/swp.py
Python
0.000001
@@ -0,0 +1,1268 @@ +#!/usr/bin/env python%0A# coding: utf-8%0A%0A#%0A# A simple example to show/set/clear the software write protect flag SWP%0A#%0Aimport sys%0A%0Afrom pyscsi.pyscsi.scsi import SCSI%0Afrom pyscsi.pyscsi.scsi_device import SCSIDevice%0Afrom pyscsi.pyscsi import scsi_enum_modesense6 as MODESENSE6%0A%0A%0Adef usage():%0A print 'Usage: swp.py %5B--help%5D %5B--on%7C--off%5D %3Cdevice%3E'%0A%0A%0Adef main():%0A swp_on = 0%0A swp_off = 0%0A i = 1%0A while i %3C len(sys.argv):%0A if sys.argv%5Bi%5D == '--help':%0A return usage()%0A if sys.argv%5Bi%5D == '--on':%0A del sys.argv%5Bi%5D%0A swp_on = 1%0A continue%0A if sys.argv%5Bi%5D == '--off':%0A del sys.argv%5Bi%5D%0A swp_off = 1%0A continue%0A i += 1%0A%0A if len(sys.argv) %3C 2:%0A return usage()%0A%0A device = sys.argv%5B1%5D%0A%0A sd = SCSIDevice(device)%0A s = SCSI(sd)%0A i = s.modesense6(page_code=MODESENSE6.PAGE_CODE.CONTROL).result%0A%0A if swp_on:%0A i%5B'mode_pages'%5D%5B0%5D%5B'swp'%5D = 1%0A s.modeselect6(i)%0A print 'Set SWP ON'%0A return%0A%0A if swp_off:%0A i%5B'mode_pages'%5D%5B0%5D%5B'swp'%5D = 0%0A s.modeselect6(i)%0A print 'Set SWP OFF'%0A return%0A%0A print 'SWP is %25s' %25 (%22ON%22 if i%5B'mode_pages'%5D%5B0%5D%5B'swp'%5D else %22OFF%22)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A%0A
80d2fa29185e9c3c54ed1e173122bbe5a78624a4
Create tutorial4.py
tutorial4.py
tutorial4.py
Python
0
@@ -0,0 +1 @@ +%0A
f8d4596db159f143d51c62ea2a097a72f9877ee6
Add test for clusqmgr
test/clusqmgr.py
test/clusqmgr.py
Python
0
@@ -0,0 +1,739 @@ +import unittest%0Afrom testbase import MQWebTest%0A%0Aclass TestQueueActions(MQWebTest):%0A%0A%09def testInquire(self):%0A%0A%09%09data = self.getJSON('/api/clusqmgr/inquire/' + self.qmgr)%0A%09%09%0A%09%09self.assertFalse('mqweb' not in data, 'No mqweb data returned')%0A%0A%09%09if 'error' in data:%0A%09%09%09self.assertFalse(True, 'Received a WebSphere MQ error:' + str(data%5B'error'%5D%5B'reason'%5D%5B'code'%5D))%0A%0A%09%09self.assertFalse('clusqmgrs' not in data, 'No clusqmgrs array returned')%0A%09%09self.assertFalse(len(data%5B'clusqmgrs'%5D) == 0, 'No cluster information found')%0A%09%09%0A%09%09self.assertTrue(self.checkIds(data%5B'clusqmgrs'%5D%5B0%5D), 'There are unmapped Websphere MQ attributes')%0A%0Asuite = unittest.TestLoader().loadTestsFromTestCase(TestQueueActions)%0Aunittest.TextTestRunner(verbosity=2).run(suite)%0A
66443f49c932fba9203b878b7be5f8c1a99a4e9e
make pacbio like names
iron/utilities/rename_to_pacbio.py
iron/utilities/rename_to_pacbio.py
Python
0.02221
@@ -0,0 +1,968 @@ +#!/usr/bin/python%0Aimport sys,argparse%0Afrom SequenceBasics import FastaHandleReader, FastqHandleReader%0A%0Adef main():%0A parser = argparse.ArgumentParser()%0A parser.add_argument('input',help=%22Use - for STDIN%22)%0A group = parser.add_mutually_exclusive_group(required=True)%0A group.add_argument('--fasta',action='store_true')%0A group.add_argument('--fastq',action='store_true')%0A args = parser.parse_args()%0A%0A if args.input=='-': args.input = sys.stdin%0A else: args.input= open(args.input)%0A%0A if args.fasta:%0A args.input = FastaHandleReader(args.input)%0A elif args.fastq:%0A args.input = FastqHandleReader(args.input)%0A z = 0%0A while True:%0A e = args.input.read_entry()%0A if not e: break%0A z+=1%0A name = 'm150101_010101_11111_c111111111111111111_s1_p0/'+str(z)+'/ccs'%0A if args.fastq:%0A print '@'+name%0A print e%5B'seq'%5D%0A print '+'%0A print e%5B'qual'%5D%0A elif args.fasta:%0A print '%3E'+name%0A print e%5B'seq'%5D%0Aif __name__==%22__main__%22:%0A main()%0A
dddac1090fae15edb9a8d2a2781bb80989a0bc84
add eventrange control
pilot/control/eventrange.py
pilot/control/eventrange.py
Python
0
@@ -0,0 +1,2280 @@ +#!/usr/bin/env python%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Authors:%0A# - Wen Guan, [email protected], 2018%0A%0Aimport json%0Aimport Queue%0Aimport os%0Aimport time%0A%0Afrom pilot.util import https%0Afrom pilot.util.config import config%0A%0Aimport logging%0Alogger = logging.getLogger(__name__)%0A%0A%0Adef download_event_ranges(job, num_ranges=None):%0A %22%22%22%0A Download event ranges%0A%0A :param job:%0A :param num_ranges:%0A%0A :return: List of event ranges.%0A %22%22%22%0A%0A log = logger.getChild(str(job%5B'PandaID'%5D))%0A%0A try:%0A if num_ranges == None:%0A # ToBeFix num_ranges with corecount%0A num_ranges = 1%0A%0A data = %7B'pandaID': job%5B'PandaID'%5D,%0A 'jobsetID': job%5B'jobsetID'%5D,%0A 'taskID': job%5B'taskID'%5D,%0A 'nRanges': num_ranges%7D%0A%0A log.info(%22Downloading new event ranges: %25s%22 %25 data)%0A res = https.request('%7Bpandaserver%7D/server/panda/getEventRanges'.format(pandaserver=config.Pilot.pandaserver),%0A data=data)%0A log.info(%22Downloaded event ranges: %25s%22 %25 res)%0A if res%5B'StatusCode'%5D == 0 or str(res%5B'StatusCode'%5D) == '0':%0A return res%5B'eventRanges'%5D%0A%0A return %5B%5D%0A except Exception, e:%0A log.error(%22Failed to download event ranges: %25s%22 %25 (e.get_detail()))%0A return None%0A%0A%0Adef update_event_ranges(job, event_ranges, version=1):%0A %22%22%22%0A Update an event range on the Event Server%0A%0A :param event_ranges:%0A %22%22%22%0A log = logger.getChild(str(job%5B'PandaID'%5D))%0A%0A log.info(%22Updating event ranges: %25s%22 %25 event_ranges)%0A%0A try:%0A if version:%0A data = %7B'eventRanges': json.dumps(event_ranges), 'version': 1%7D%0A else:%0A data = %7B'eventRanges': json.dumps(event_ranges)%7D%0A%0A log.info(%22Updating event ranges: %25s%22 %25 data)%0A res = https.request('%7Bpandaserver%7D/server/panda/updateEventRanges'.format(pandaserver=config.Pilot.pandaserver),%0A data=data)%0A%0A log.info(%22Updated event ranges status: %25s%22 %25 res)%0A except Exception, e:%0A log.error(%22Failed to update event ranges: %25s%22 %25 (e.get_detail()))%0A
42d6f1d17ea0f0117a82eb1933a5150b5eb1e29a
add missing is_context_manager
pikos/_internal/util.py
pikos/_internal/util.py
Python
0.999274
@@ -0,0 +1,200 @@ +import inspect%0A%0Adef is_context_manager(obj):%0A %22%22%22 Check if the obj is a context manager %22%22%22%0A # FIXME: this should work for now.%0A return hasattr(obj, '__enter__') and hasattr(obj, '__exit__')%0A
c0a809ff79d90712a5074d208193ac9fd2af9901
Add haproxy parser
playback/cli/haproxy.py
playback/cli/haproxy.py
Python
0.000001
@@ -0,0 +1,2970 @@ +import sys%0Afrom playback.api import HaproxyInstall%0Afrom playback.api import HaproxyConfig%0Afrom playback.templates.haproxy_cfg import conf_haproxy_cfg%0Afrom playback.cliutil import priority%0A%0Adef install(args):%0A try:%0A target = HaproxyInstall(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)%0A except AttributeError as e:%0A sys.stderr.write(e.message)%0A sys.exit(1)%0A target.install()%0A%0Adef config(args):%0A try:%0A target = HaproxyConfig(user=args.user, hosts=args.hosts.split(','), key_filename=args.key_filename, password=args.password)%0A except AttributeError:%0A sys.stderr.write('No hosts found. Please using --hosts param.')%0A sys.exit(1)%0A if args.upload_conf:%0A target.upload_conf(args.upload_conf)%0A if args.configure_keepalived:%0A target.configure_keepalived(args.router_id, args.priority,%0A args.state, args.interface, args.vip)%0A%0Adef gen_conf():%0A with open('haproxy.cfg', 'w') as f:%0A f.write(conf_haproxy_cfg)%0A%0A@priority(12)%0Adef make(parser):%0A %22%22%22provision HAProxy with Keepalived%22%22%22%0A s = parser.add_subparsers(%0A title='commands',%0A metavar='COMMAND',%0A help='description',%0A )%0A def install_f(args):%0A install(args)%0A install_parser = s.add_parser('install', help='install HAProxy')%0A install_parser.set_defaults(func=install_f)%0A%0A def config_f(args):%0A config(args)%0A config_parser = s.add_parser('config', help='configure HAProxy')%0A config_parser.add_argument('--upload-conf', help='upload configuration file to the target host',%0A action='store', default=False, dest='upload_conf')%0A config_parser.add_argument('--configure-keepalived', help='configure keepalived',%0A action='store_true', default=False, dest='configure_keepalived')%0A config_parser.add_argument('--router_id', help='Keepalived router id e.g. lb1',%0A action='store', default=False, dest='router_id')%0A config_parser.add_argument('--priority', help='Keepalived priority e.g. 150',%0A action='store', default=False, dest='priority')%0A config_parser.add_argument('--state', help='Keepalived state e.g. MASTER',%0A action='store', default=False, dest='state')%0A config_parser.add_argument('--interface', help='Keepalived binding interface e.g. eth0',%0A action='store', default=False, dest='interface')%0A config_parser.add_argument('--vip', help='Keepalived virtual ip e.g. CONTROLLER_VIP',%0A action='store', default=False, dest='vip')%0A config_parser.set_defaults(func=config_f)%0A%0A def gen_conf_f(args):%0A gen_conf()%0A gen_conf_parser = s.add_parser('gen-conf', help='generate the example configuration to the current location')%0A gen_conf_parser.set_defaults(func=gen_conf_f)%0A
acc5c52011db4c8edc615ae3e0cad9cea4fe58b8
Add basic test for filesystem observer source
spreadflow_observer_fs/test/test_source.py
spreadflow_observer_fs/test/test_source.py
Python
0
@@ -0,0 +1,2204 @@ +# -*- coding: utf-8 -*-%0A# pylint: disable=too-many-public-methods%0A%0A%22%22%22%0AIntegration tests for spreadflow filesystem observer source.%0A%22%22%22%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import unicode_literals%0A%0Aimport copy%0A%0Afrom bson import BSON%0Afrom datetime import datetime%0Afrom twisted.internet import defer%0A%0Afrom mock import Mock%0Afrom testtools import TestCase, run_test_with%0Afrom testtools.twistedsupport import AsynchronousDeferredRunTest%0A%0Afrom spreadflow_core.scheduler import Scheduler%0Afrom spreadflow_delta.test.matchers import MatchesSendDeltaItemInvocation%0A%0Afrom spreadflow_observer_fs.source import FilesystemObserverSource%0A%0A%0Adef _spawnProcess(processProtocol, executable, args=(), env=%7B%7D, path=None, uid=None, gid=None, usePTY=0, childFDs=None):%0A %22%22%22%0A Spawn process method signature.%0A %22%22%22%0A%0Aclass SpreadflowSourceIntegrationTestCase(TestCase):%0A %22%22%22%0A Integration tests for spreadflow filesystem observer source.%0A %22%22%22%0A%0A @run_test_with(AsynchronousDeferredRunTest)%0A @defer.inlineCallbacks%0A def test_source_process(self):%0A source = FilesystemObserverSource('*.txt', '/some/directory')%0A%0A reactor = Mock()%0A reactor.spawnProcess = Mock(spec=_spawnProcess)%0A%0A scheduler = Mock()%0A scheduler.send = Mock(spec=Scheduler.send)%0A%0A # Attach source to the scheduler.%0A yield source.attach(scheduler, reactor)%0A self.assertEquals(reactor.spawnProcess.call_count, 1)%0A%0A # Simulate a message directed to the source.%0A msg = %7B%0A 'port': 'default',%0A 'item': %7B%0A 'type': 'delta',%0A 'date': datetime(2010, 10, 20, 20, 10),%0A 'inserts': %5B'abcdefg'%5D,%0A 'deletes': %5B'hiklmno'%5D,%0A 'data': %7B%0A 'abcdefg': %7B%0A 'path': '/some/directory/xyz.txt'%0A %7D%0A %7D%0A %7D%0A %7D%0A%0A matches = MatchesSendDeltaItemInvocation(copy.deepcopy(msg%5B'item'%5D), source)%0A source.peer.dataReceived(BSON.encode(msg))%0A self.assertEquals(scheduler.send.call_count, 1)%0A self.assertThat(scheduler.send.call_args, matches)%0A
ad519b5e919bcd0efe48219c684ca19e9ef55ddc
Add realpath() calls to Uv watchers where appropriate
powerline/lib/watcher/uv.py
powerline/lib/watcher/uv.py
# vim:fileencoding=utf-8:noet from __future__ import (unicode_literals, division, absolute_import, print_function) import os from collections import defaultdict from threading import RLock from functools import partial from threading import Thread from powerline.lib.path import realpath class UvNotFound(NotImplementedError): pass pyuv = None def import_pyuv(): global pyuv if not pyuv: try: pyuv = __import__('pyuv') except ImportError: raise UvNotFound class UvThread(Thread): daemon = True def __init__(self, loop): self.uv_loop = loop super(UvThread, self).__init__() def run(self): while True: self.uv_loop.run() def join(self): self.uv_loop.stop() return super(UvThread, self).join() _uv_thread = None def start_uv_thread(): global _uv_thread if _uv_thread is None: loop = pyuv.Loop() _uv_thread = UvThread(loop) _uv_thread.start() return _uv_thread.uv_loop class UvWatcher(object): def __init__(self): import_pyuv() self.watches = {} self.lock = RLock() self.loop = start_uv_thread() def watch(self, path): with self.lock: if path not in self.watches: try: self.watches[path] = pyuv.fs.FSEvent( self.loop, path, partial(self._record_event, path), pyuv.fs.UV_CHANGE | pyuv.fs.UV_RENAME ) except pyuv.error.FSEventError as e: code = e.args[0] if code == pyuv.errno.UV_ENOENT: raise OSError('No such file or directory: ' + path) else: raise def unwatch(self, path): with self.lock: try: watch = self.watches.pop(path) except KeyError: return watch.close(partial(self._stopped_watching, path)) def is_watching(self, path): with self.lock: return path in self.watches def __del__(self): try: lock = self.lock except AttributeError: pass else: with lock: while self.watches: path, watch = self.watches.popitem() watch.close(partial(self._stopped_watching, path)) class UvFileWatcher(UvWatcher): def __init__(self): super(UvFileWatcher, self).__init__() self.events = defaultdict(list) def _record_event(self, path, fsevent_handle, filename, events, error): with self.lock: self.events[path].append(events) if events | pyuv.fs.UV_RENAME: if not os.path.exists(path): self.watches.pop(path).close() def _stopped_watching(self, path, *args): self.events.pop(path, None) def __call__(self, path): with self.lock: events = self.events.pop(path, None) if events: return True if path not in self.watches: self.watch(path) return True class UvTreeWatcher(UvWatcher): is_dummy = False def __init__(self, basedir, ignore_event=None): super(UvTreeWatcher, self).__init__() self.ignore_event = ignore_event or (lambda path, name: False) self.basedir = realpath(basedir) self.modified = True self.watch_directory(self.basedir) def watch_directory(self, path): os.path.walk(path, self.watch_one_directory, None) def watch_one_directory(self, arg, dirname, fnames): try: self.watch(dirname) except OSError: pass def _stopped_watching(self, path, *args): pass def _record_event(self, path, fsevent_handle, filename, events, error): if not self.ignore_event(path, filename): self.modified = True if events == pyuv.fs.UV_CHANGE | pyuv.fs.UV_RENAME: # Stat changes to watched directory are UV_CHANGE|UV_RENAME. It # is weird. pass elif events | pyuv.fs.UV_RENAME: if not os.path.isdir(path): self.unwatch(path) else: full_name = os.path.join(path, filename) if os.path.isdir(full_name): # For some reason mkdir and rmdir both fall into this # category self.watch_directory(full_name) def __call__(self): return self.__dict__.pop('modified', False)
Python
0
@@ -1070,32 +1070,56 @@ ch(self, path):%0A +%09%09path = realpath(path)%0A %09%09with self.lock @@ -1120,16 +1120,16 @@ f.lock:%0A - %09%09%09if pa @@ -1526,32 +1526,56 @@ ch(self, path):%0A +%09%09path = realpath(path)%0A %09%09with self.lock @@ -1762,20 +1762,30 @@ %09return -path +realpath(path) in self @@ -2472,32 +2472,56 @@ __(self, path):%0A +%09%09path = realpath(path)%0A %09%09with self.lock @@ -3008,20 +3008,30 @@ th.walk( -path +realpath(path) , self.w
4d1c81af1d028b2d0fd58f8bab7e7e0246c04f3b
Create alternative_matching.py
hacker_rank/regex/grouping_and_capturing/alternative_matching.py
hacker_rank/regex/grouping_and_capturing/alternative_matching.py
Python
0.00001
@@ -0,0 +1,82 @@ +Regex_Pattern = r'%5E(Mr%5C.%7CMrs%5C.%7CMs%5C.%7CDr%5C.%7CEr%5C.)%5Ba-zA-Z%5D%7B1,%7D$'%09# Do not delete 'r'.%0A
978b03cc6c1e36b7ce94fbde7c46f3a633ccb4d0
fix import statement
netforce_account/netforce_account/models/import_statement.py
netforce_account/netforce_account/models/import_statement.py
# Copyright (c) 2012-2015 Netforce Co. Ltd. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. # IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, # DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR # OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE # OR OTHER DEALINGS IN THE SOFTWARE. from netforce.model import Model, fields, get_model import csv from io import StringIO from netforce.database import get_active_db import os from datetime import * from dateutil.relativedelta import * class ImportStatement(Model): _name = "import.statement" _transient = True _fields = { "account_id": fields.Many2One("account.account", "Account", required=True, on_delete="cascade"), "date_start": fields.Date("From Date", required=True), "date_end": fields.Date("To Date", required=True), "file": fields.File("CSV File", required=True), "encoding": fields.Selection([["utf-8", "UTF-8"], ["tis-620", "TIS-620"]], "Encoding", required=True), } _defaults = { "encoding": "utf-8", } def import_data(self, ids, context={}): obj = self.browse(ids[0]) acc_id = obj.account_id.id dbname = get_active_db() data = open(os.path.join("static", "db", dbname, "files", obj.file), "rb").read().decode(obj.encoding) found_delim = False for delim in (",", ";", "\t"): try: try: rd = csv.reader(StringIO(data), delimiter=delim) except: raise Exception("Invalid CSV file") headers = next(rd) headers = [h.strip() for h in headers] for h in ["Date", "Description", "Spent", "Received", "Balance"]: if not h in headers: raise Exception("Missing header: '%s'" % h) found_delim = True break except: pass if not found_delim: raise Exception("Failed to open CSV file") rows = [r for r in rd] if not rows: raise Exception("Statement is empty") formats = ["%Y-%m-%d", "%d/%m/%Y", "%m/%d/%Y", "%d/%m/%y", "%m/%d/%y", "%d-%m-%y", "%m-%d-%y"] date_fmt = None for fmt in formats: fmt_ok = True for row in rows: vals = dict(zip(headers, row)) date = vals["Date"].strip() if not date: continue try: datetime.strptime(date, fmt) except: fmt_ok = False break if fmt_ok: date_fmt = fmt break if not date_fmt: raise Exception("Could not detect date format") lines = [] for i, row in enumerate(rows): vals = dict(zip(headers, row)) try: date = vals["Date"].strip() if not date: raise Exception("Missing date") date = datetime.strptime(date, date_fmt).strftime("%Y-%m-%d") if date < obj.date_start: raise Exception("Transaction date is before start date: %s" % date) if date > obj.date_end: raise Exception("Transaction date is after end date: %s" % date) balance = vals["Balance"].strip().replace(",", "") if not balance: raise Exception("missing balance") try: balance = float(balance) except: raise Exception("Failed to read Balance amount") description = vals.get("Description").strip() try: spent = vals["Spent"].strip().replace(",", "") spent = float(spent) if spent else 0 except: raise Exception("Failed to read Spent amount") try: received = vals["Received"].strip().replace(",", "") received = float(received) if received else 0 except: raise Exception("Failed to read Received amount") if not spent and not received: raise Exception("No spent or received amount") if spent and received: raise Exception("Can not have both Spent and Received amounts on the same line") line_vals = { "date": date, "balance": balance, "description": description, "spent": spent, "received": received, } lines.append(line_vals) except Exception as e: raise Exception("Error on line %d (%s)" % (i + 2, e)) if not lines: raise Exception("Empty statement") first_bal = lines[0]["balance"] + lines[0]["spent"] - lines[0]["received"] first_date = lines[0]["date"] res = get_model("account.statement.line").search( [["statement_id.account_id", "=", acc_id], ["date", "<", first_date]], order="date desc,id desc", limit=1) if res: prev_line = get_model("account.statement.line").browse(res[0]) prev_bal = prev_line.balance if first_bal != prev_bal: raise Exception("Invalid balance: previous balance is %.2f" % prev_bal) st_vals = { "account_id": acc_id, "date_start": obj.date_start, "date_end": obj.date_end, "balance_start": first_bal, "lines": [("create", v) for v in lines], } stmt_id = get_model("account.statement").create(st_vals) return { "next": { "name": "statement", "mode": "page", "active_id": stmt_id, } } def onchange_account(self, context={}): data = context["data"] account_id = data["account_id"] acc = get_model("account.account").browse(account_id) if acc.statements: st = acc.statements[0] d = datetime.strptime(st.date_end, "%Y-%m-%d") + timedelta(days=1) data["date_start"] = d.strftime("%Y-%m-%d") data["date_end"] = (d + relativedelta(day=31)).strftime("%Y-%m-%d") else: data["date_start"] = (datetime.today() - relativedelta(day=1)).strftime("%Y-%m-%d") data["date_end"] = (datetime.today() + relativedelta(day=31)).strftime("%Y-%m-%d") return data ImportStatement.register()
Python
0.000008
@@ -6206,16 +6206,20 @@ if +abs( first_ba @@ -6223,20 +6223,17 @@ _bal - != +- prev_bal :%0A @@ -6228,16 +6228,23 @@ prev_bal +)%3E0.001 :%0A
251a045b6dc10d2c4f7ff94ca10b052ce44b68fe
add country for parents
source_data/etl_tasks/transform_upload.py
source_data/etl_tasks/transform_upload.py
import pprint as pp import xlrd import pandas as pd from django.db import IntegrityError from django.core.exceptions import ValidationError from django.core.exceptions import ObjectDoesNotExist from django.conf import settings from pandas.io.excel import read_excel from source_data.models import * from datapoints.models import DataPoint, Source, Office, Region class DocTransform(object): def __init__(self,document_id,file_type): self.source_datapoints = [] self.file_type = file_type self.document = Document.objects.get(id=document_id) self.file_path = settings.MEDIA_ROOT + str(self.document.docfile) self.df = self.create_df() def create_df(self): if self.file_path.endswith('.csv'): df = pd.read_csv(self.file_path) else: wb = xlrd.open_workbook(self.file_path) sheet = wb.sheets()[0] df = read_excel(self.file_path,sheet.name) return df def get_essential_columns(self): column_mapping = { 'region':[], 'Campaign':[] } header_list = [str(col) for col in self.df.columns.values] overrides = HeaderOverride.objects.filter(header_string__in=header_list) for o in overrides: try: print o.content_type.name column_mapping[o.content_type.name].append(o.header_string) except KeyError: pass return column_mapping class RegionTransform(DocTransform): def validate(self): essential_columns = ['name','code','parent_name','region_type','country','lat','lon'] df_cols = [col for col in self.df] intsct = list(set(essential_columns).intersection(df_cols)) if sorted(essential_columns) == sorted(intsct): valid_df = self.df[essential_columns] return None,valid_df else: err = 'must have all of the following columns: ' + str(essential_columns) return err,None def insert_source_regions(self,valid_df): ''' in this method we take a datframe go through and create a source region for each record. If the source region exists ( the string exists ), we update that record with the new values. After this, we do the same with the parent regions. One fall back currently is that the only conflict resolution for this is the fact that newer records take precedence. This means for instance that if you upload a region with no lon/lat it could override the same region that currently has lon/lat''' valid_df['region_name'] = valid_df['name'] # unable to access name attr directly... fix this parent_regions = [] just_created, updated, errors = [],[],[] for row in valid_df.iterrows(): row_data = row[1] parent_regions.append(row_data.parent_name) try: child_defaults = { 'region_code': row_data.code,\ 'parent_name': row_data.parent_name,\ 'region_type': row_data.region_type,\ 'country': row_data.country,\ 'lat': row_data.lat,\ 'lon': row_data.lon,\ 'document': self.document,\ 'source_guid': row_data.region_name.encode('utf-8')} sr,created = SourceRegion.objects.get_or_create( region_string = row_data.region_name,\ defaults= child_defaults) except UnicodeDecodeError as err: errors.append(row_data.region_name) if created == 1: just_created.append(sr) else: # update the row in the db with all of the new values. updated_sr = SourceRegion.objects.filter(id=sr.id).update(**child_defaults) updated.append(updated_sr) ############################# ## Now process the parents ## ############################# distinct_parent_regions = list(set(parent_regions)) for reg in distinct_parent_regions: try: parent_defaults = { 'region_code': reg,\ 'document': self.document,\ 'source_guid': reg.encode('utf-8')} except UnicodeDecodeError as err: errors.append(row_data.region_name) parent_sr, created = SourceRegion.objects.get_or_create( region_string = reg,defaults = parent_defaults) if created == 1: just_created.append(parent_sr) else: # update the row in the db with all of the new values. updated_parent_sr = SourceRegion.objects.filter(id=parent_sr.id).\ update(**parent_defaults) updated.append(parent_sr)
Python
0.99998
@@ -4317,32 +4317,80 @@ self.document,%5C%0A + 'country': row_data.country%0A
8033f8a033ddc38c3f1e2276c8c2b4f50c8360fb
Add Python template
src/template.py
src/template.py
Python
0.000001
@@ -0,0 +1,838 @@ +#!/usr/bin/env python%0A# -*- coding: UTF-8 -*-%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0A%0Aimport os%0Aimport argparse%0A%0Adef main(filename=None):%0A print(%22Hello world!%22)%0A if os.path.isfile(filename) is not True:%0A file_status = ' (file does not exist)'%0A else:%0A file_status = ''%0A print(%22Input File = '%7B%7D'%7B%7D%22.format(filename,file_status))%0A%0A _, file_ext = os.path.splitext(filename)%0A if not file_ext in %5B'.txt','.text'%5D:%0A print(%22File extension '%7B%7D' is invalid%22.format(file_ext))%0A%0Aif __name__ == %22__main__%22:%0A parser = argparse.ArgumentParser(description='Starting Template for Python 3 Programs')%0A parser.add_argument('file',help='Input file')%0A args = parser.parse_args()%0A%0A main(args.file)%0A
11603040c58e27ebb109275bd4454a54e0c61d42
Test examples
tests/acceptance/test_examples.py
tests/acceptance/test_examples.py
Python
0.000001
@@ -0,0 +1,1162 @@ +from typing import Dict%0A%0Afrom miniworld.util import JSONConfig%0A%0A%0A# TODO: examples/batman_adv.json, problem is configurator%0Adef test_snapshot_boot_single_scenario(image_path, runner):%0A with runner() as r:%0A for _ in range(5):%0A scenario = JSONConfig.read_json_config('examples/nb_bridged_lan.json') # type: Dict%0A r.start_scenario(scenario)%0A r.step()%0A r.step()%0A r.stop(hard=False)%0A%0A# TODO: test stop/step%0Adef test_snapshot_boot_multiple_scenarios(image_path, runner):%0A with runner() as r:%0A scenario = JSONConfig.read_json_config('examples/batman_adv.json') # type: Dict%0A r.start_scenario(scenario)%0A for _ in range(5):%0A r.step()%0A r.stop(hard=False)%0A%0A scenario = JSONConfig.read_json_config('examples/nb_bridged_lan.json') # type: Dict%0A r.start_scenario(scenario)%0A for _ in range(5):%0A r.step()%0A r.stop(hard=False)%0A%0A scenario = JSONConfig.read_json_config('examples/nb_bridged_wifi.json') # type: Dict%0A r.start_scenario(scenario)%0A for _ in range(5):%0A r.step()%0A r.stop(hard=False)%0A
d2c5462c5677d7674921f02687017f4128f219f7
Create while_loop_else.py
while_loop_else.py
while_loop_else.py
Python
0.000058
@@ -0,0 +1,105 @@ +// You can actually do a while loop that ends with an else //%0A%0Awhile True:%0A ...%0A ...%0A ...%0A ...%0Aelse:%0A
0322e1c51fe07cc9707a687ab309a00ca374a1af
Add a cleanup_test_data management command to remove old test data from dev and stage
moztrap/model/core/management/commands/cleanup_test_data.py
moztrap/model/core/management/commands/cleanup_test_data.py
Python
0
@@ -0,0 +1,1371 @@ +# This Source Code Form is subject to the terms of the Mozilla Public%0A# License, v. 2.0. If a copy of the MPL was not distributed with this%0A# file, You can obtain one at http://mozilla.org/MPL/2.0/.%0A%0Afrom datetime import datetime%0Afrom optparse import make_option%0A%0Afrom django.core.management.base import BaseCommand%0A%0Afrom moztrap.model.core import models as core_models%0Afrom moztrap.model.environments import models as env_models%0A%0A%0Aclass Command(BaseCommand):%0A help = 'Deletes old test data'%0A option_list = BaseCommand.option_list + (%0A make_option('--permanent',%0A action='store_true',%0A dest='permanent',%0A default=True,%0A help='Permanently delete records?'),)%0A%0A def handle(self, *args, **options):%0A for model in (core_models.Product,%0A env_models.Category,%0A env_models.Element,%0A env_models.Profile):%0A obj_type = model._meta.object_name%0A objects_to_delete = model.everything.filter(name__startswith='Test %25s ' %25 obj_type)%0A obj_count = objects_to_delete.count()%0A objects_to_delete.delete(permanent=options%5B'permanent'%5D)%0A self.stdout.write('%25s: %25s test %25s object(s) deleted%5Cn' %25%0A (datetime.now().isoformat(), obj_count, obj_type))%0A
e2004076b1e04df21d9122d94e8ac00776542483
Create new package. (#6044)
var/spack/repos/builtin/packages/r-allelicimbalance/package.py
var/spack/repos/builtin/packages/r-allelicimbalance/package.py
Python
0
@@ -0,0 +1,2759 @@ +##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RAllelicimbalance(RPackage):%0A %22%22%22Provides a framework for allelic specific expression%0A investigation using RNA-seq data.%22%22%22%0A%0A homepage = %22http://bioconductor.org/packages/AllelicImbalance/%22%0A url = %22https://git.bioconductor.org/packages/AllelicImbalance%22%0A%0A version('1.14.0', git='https://git.bioconductor.org/packages/AllelicImbalance', commit='35958534945819baafde0e13d1eb4d05a514142c')%0A%0A depends_on('[email protected]:3.4.9', when='@1.14.0')%0A depends_on('r-genomicranges', type=('build', 'run'))%0A depends_on('r-summarizedexperiment', type=('build', 'run'))%0A depends_on('r-genomicalignments', type=('build', 'run'))%0A depends_on('r-biocgenerics', type=('build', 'run'))%0A depends_on('r-annotationdbi', type=('build', 'run'))%0A depends_on('r-bsgenome', type=('build', 'run'))%0A depends_on('r-variantannotation', type=('build', 'run'))%0A depends_on('r-biostrings', type=('build', 'run'))%0A depends_on('r-s4vectors', type=('build', 'run'))%0A depends_on('r-iranges', type=('build', 'run'))%0A depends_on('r-rsamtools', type=('build', 'run'))%0A depends_on('r-genomicfeatures', type=('build', 'run'))%0A depends_on('r-gviz', type=('build', 'run'))%0A depends_on('r-lattice', type=('build', 'run'))%0A depends_on('r-latticeextra', type=('build', 'run'))%0A depends_on('r-gridextra', type=('build', 'run'))%0A depends_on('r-seqinr', type=('build', 'run'))%0A depends_on('r-genomeinfodb', type=('build', 'run'))%0A depends_on('r-nlme', type=('build', 'run'))%0A
483cdf6b4dd846d9da11788ae98d86d373fb5c49
add analyze script
app/lda/scripts/analyze.py
app/lda/scripts/analyze.py
Python
0.000001
@@ -0,0 +1,1476 @@ +from __future__ import print_function%0Aimport numpy as np%0Aimport sys%0Aimport pandas as pd%0A%0Aphi_path = '/users/wdai/bosen/app/lda/output/lda.S0.M4.T32/lda_out.phi'%0Anum_topics = 100%0Anum_words = 52210%0Atop_k = 10%0Adict_path = '/users/wdai/bosen/app/lda/datasets/words_freq.tsv'%0Atopk_file = '/users/wdai/bosen/app/lda/output/topk.tsv'%0A%0Adef read_dict():%0A df = pd.read_csv(dict_path, sep='%5Ct')%0A min_occur = 10%0A df = df%5Bdf%5B'count'%5D %3E= min_occur%5D%0A df = df%5Bdf%5B'count'%5D %3C= 1e6%5D # remove super frequent words%0A print('# of words occuring at least 10 times:', len(df.index))%0A words = df%5B'word'%5D.tolist()%0A id = df%5B'id'%5D.as_matrix()%0A # TODO(wdai): remap the word ID after truncation.%0A return dict(zip(id, words))%0A%0Aif __name__ == '__main__':%0A phi = np.zeros((num_topics, num_words))%0A with open(phi_path, 'r') as f:%0A lines = f.readlines()%0A for topic, line in enumerate(lines):%0A fields = %5Bfloat(field.strip()) for field in line.split()%5D%0A assert len(fields) == num_words, 'len(fields): %25d vs num_words %25d' %25 %5C%0A (len(fields), num_words)%0A phi%5Btopic, :%5D = fields%0A # top-k words%0A #topk = np.zeros((num_topics, top_k))%0A i2w = read_dict()%0A with open(topk_file, 'w') as f:%0A for t in range(num_topics):%0A ind = np.argpartition(phi%5Bt,:%5D, -top_k, axis=0)%5B-top_k:%5D%0A ind = ind%5Bnp.argsort(phi%5Bt,ind%5D)%5B::-1%5D%5D%0A for n in ind:%0A f.write('%25s:%25.2f%5Ct' %25 (i2w%5Bn%5D, phi%5Bt,n%5D))%0A f.write('%5Cn')%0A print('Output top %25d words to %25s' %25 (top_k, topk_file))%0A
fcb02edeb8fafa8c297d48edc8ebf6b389321430
add test
test_iris.py
test_iris.py
Python
0.000002
@@ -0,0 +1,1735 @@ +import unittest%0Afrom sklearn import datasets%0Afrom sklearn.utils.validation import check_random_state%0Afrom stacked_generalization import StackedClassifier, FWLSClassifier%0Afrom sklearn.ensemble import RandomForestClassifier%0Afrom sklearn.ensemble import ExtraTreesClassifier%0Afrom sklearn.ensemble import GradientBoostingClassifier%0Afrom sklearn.linear_model import LogisticRegression, RidgeClassifier%0A%0Afrom sklearn.tree.tree import SPARSE_SPLITTERS%0A%0Aclass TestStackedClassfier(unittest.TestCase):%0A def setUp(self):%0A iris = datasets.load_iris()%0A rng = check_random_state(0)%0A perm = rng.permutation(iris.target.size)%0A iris.data = iris.data%5Bperm%5D%0A iris.target = iris.target%5Bperm%5D%0A self.iris = iris%0A%0A def test_stacked_classfier(self):%0A bclf = LogisticRegression(random_state=1)%0A clfs = %5BRandomForestClassifier(n_estimators=50, criterion = 'gini', random_state=1),%0A ExtraTreesClassifier(n_estimators=50, criterion = 'gini', random_state=2),%0A ExtraTreesClassifier(n_estimators=40, criterion = 'gini', random_state=3),%0A GradientBoostingClassifier(n_estimators=25, random_state=1),%0A GradientBoostingClassifier(n_estimators=40, random_state=1),%0A RidgeClassifier(random_state=1)%5D%0A sl = StackedClassifier(bclf, clfs, n_folds=3, verbose=0, stack_by_proba=True, oob_score_flag=True)%0A sl.fit(self.iris.data, self.iris.target)%0A score = sl.score(self.iris.data, self.iris.target)%0A self.assertGreater(score, 0.8, %22Failed with score = %7B0%7D%22.format(score))%0A self.assertGreater(score, 0.8, %22Failed with score = %7B0%7D%22.format(sl.oob_score_))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
f43ac6c526aceddea81c8fccf99e33bd7cb917c4
fix api wrapper
src/sentry/debug/middleware.py
src/sentry/debug/middleware.py
from __future__ import absolute_import import json import re from debug_toolbar.toolbar import DebugToolbar from django.conf import settings from django.template.loader import render_to_string from django.utils.encoding import force_text from six.moves import _thread as thread class ToolbarCache(object): def __init__(self): self._toolbars = {} def create(self, request): toolbar = DebugToolbar(request) self._toolbars[thread.get_ident()] = toolbar return toolbar def pop(self): return self._toolbars.pop(thread.get_ident(), None) def get(self): return self._toolbars.get(thread.get_ident(), None) toolbar_cache = ToolbarCache() class DebugMiddleware(object): _body_regexp = re.compile(re.escape('</body>'), flags=re.IGNORECASE) def show_toolbar_for_request(self, request): if not settings.SENTRY_DEBUGGER: return False if not request.is_superuser(): return False if 'text/html' not in request.META.get('HTTP_ACCEPT', '*/*'): return False return True def show_toolbar_for_response(self, response): content_type = response['Content-Type'] for type in ('text/html', 'application/json'): if type in content_type: return True return False def process_request(self, request): # Decide whether the toolbar is active for this request. if not self.show_toolbar_for_request(request): return toolbar = toolbar_cache.create(request) # Activate instrumentation ie. monkey-patch. for panel in toolbar.enabled_panels: panel.enable_instrumentation() # Run process_request methods of panels like Django middleware. response = None for panel in toolbar.enabled_panels: response = panel.process_request(request) if response: break return response def process_view(self, request, view_func, view_args, view_kwargs): toolbar = toolbar_cache.get() if not toolbar: return # Run process_view methods of panels like Django middleware. response = None for panel in toolbar.enabled_panels: response = panel.process_view(request, view_func, view_args, view_kwargs) if response: break def process_response(self, request, response): toolbar = toolbar_cache.pop() if not toolbar: return response if not self.show_toolbar_for_response(response): return response # Run process_response methods of panels like Django middleware. for panel in reversed(toolbar.enabled_panels): new_response = panel.process_response(request, response) if new_response: response = new_response # Deactivate instrumentation ie. monkey-unpatch. This must run # regardless of the response. Keep 'return' clauses below. # (NB: Django's model for middleware doesn't guarantee anything.) for panel in reversed(toolbar.enabled_panels): panel.disable_instrumentation() # Collapse the toolbar by default if SHOW_COLLAPSED is set. if 'djdt' in request.COOKIES: response.delete_cookie('djdt') try: content = force_text(response.content, encoding='utf-8') except UnicodeDecodeError: # Make sure we at least just return a response on an encoding issue return response if 'text/html' not in response['Content-Type']: if 'application/json' in response['Content-Type']: content = json.dumps(json.loads(content), indent=2) content = render_to_string('debug_toolbar/wrapper.html', { 'content': content, }) response['Content-Type'] = 'text/html' # Insert the toolbar in the response. bits = self._body_regexp.split(content) if len(bits) > 1: bits[-2] += toolbar.render_toolbar() content = '</body>'.join(bits) response.content = content response['Content-Length'] = len(content) return response
Python
0.000015
@@ -152,132 +152,207 @@ ngo. -template.loader import render_to_string%0Afrom django.utils.encoding import force_text%0Afrom six.moves import _thread as thread +utils.encoding import force_text%0Afrom django.utils.html import escape%0Afrom six.moves import _thread as thread%0A%0AWRAPPER = %22%22%22%0A%3C!DOCTYPE html%3E%0A%3Chtml%3E%0A%3Cbody%3E%0A %3Cpre%3E%7Bcontent%7D%3C/pre%3E%0A%3C/body%3E%0A%3C/html%3E%0A%22%22%22 %0A%0A%0Ac @@ -3847,16 +3847,17 @@ dent=2)%0A +%0A @@ -3874,56 +3874,23 @@ t = -render_to_string('debug_toolbar/wrapper.html', %7B +WRAPPER.format( %0A @@ -3906,26 +3906,31 @@ -' content -': +=escape( content +) ,%0A @@ -3939,17 +3939,16 @@ -%7D )%0A
4a0af754fc32deb5f0330a239efdeb53645936e6
Simplify similarity connection management. (#5851)
src/sentry/similarity/index.py
src/sentry/similarity/index.py
from __future__ import absolute_import import itertools import time from collections import Counter, defaultdict from sentry.utils.iterators import chunked from sentry.utils.redis import load_script index = load_script('similarity/index.lua') def band(n, value): assert len(value) % n == 0 return list(chunked(value, len(value) / n)) def flatten(value): return list(itertools.chain.from_iterable(value)) class MinHashIndex(object): def __init__(self, cluster, namespace, signature_builder, bands, interval, retention): self.cluster = cluster self.namespace = namespace self.signature_builder = signature_builder self.bands = bands self.interval = interval self.retention = retention def __build_signatures(self, items): data = defaultdict( lambda: [Counter() for _ in xrange(self.bands)], ) for idx, features in items: bands = map( ','.join, band( self.bands, map( '{}'.format, self.signature_builder(features), ), ) ) for i, bucket in enumerate(bands): data[idx][i][bucket] += 1 arguments = [len(data)] for idx, bands in data.items(): arguments.append(idx) for buckets in bands: arguments.append(len(buckets)) for bucket, count in buckets.items(): arguments.extend([ bucket, count, ]) return arguments def classify(self, scope, items, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'CLASSIFY', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, ] arguments.extend(self.__build_signatures(items)) return [ [(item, float(score)) for item, score in result] for result in index( self.cluster.get_local_client_for_key(scope), [], arguments, ) ] def compare(self, scope, key, indices, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'COMPARE', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, key, ] arguments.extend(indices) return [ [(item, float(score)) for item, score in result] for result in index( self.cluster.get_local_client_for_key(scope), [], arguments, ) ] def record(self, scope, key, items, timestamp=None): if not items: return # nothing to do if timestamp is None: timestamp = int(time.time()) arguments = [ 'RECORD', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, key, ] arguments.extend(self.__build_signatures(items)) return index( self.cluster.get_local_client_for_key(scope), [], arguments, ) def merge(self, scope, destination, items, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'MERGE', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, destination, ] for idx, source in items: arguments.extend([idx, source]) return index( self.cluster.get_local_client_for_key(scope), [], arguments, ) def delete(self, scope, items, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'DELETE', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, ] for idx, key in items: arguments.extend([idx, key]) return index( self.cluster.get_local_client_for_key(scope), [], arguments, ) def scan(self, scope, indices, batch=1000, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'SCAN', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, ] clients = map( self.cluster.get_local_client, self.cluster.hosts, ) for client in clients: cursors = {idx: 0 for idx in indices} while cursors: requests = [] for idx, cursor in cursors.items(): requests.append([idx, cursor, batch]) responses = index( client, [], arguments + flatten(requests), ) for (idx, _, _), (cursor, chunk) in zip(requests, responses): cursor = int(cursor) if cursor == 0: del cursors[idx] else: cursors[idx] = cursor yield client, idx, chunk def flush(self, scope, indices, batch=1000, timestamp=None): for client, index, chunk in self.scan(scope, indices, batch, timestamp): if chunk: client.delete(*chunk) def export(self, scope, items, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'EXPORT', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, ] for idx, key in items: arguments.extend([idx, key]) return index( self.cluster.get_local_client_for_key(scope), [], arguments, ) def import_(self, scope, items, timestamp=None): if timestamp is None: timestamp = int(time.time()) arguments = [ 'IMPORT', timestamp, self.namespace, self.bands, self.interval, self.retention, scope, ] for idx, key, data in items: arguments.extend([idx, key, data]) return index( self.cluster.get_local_client_for_key(scope), [], arguments, )
Python
0.000002
@@ -1675,16 +1675,115 @@ uments%0A%0A + def _get_connection(self, scope):%0A return self.cluster.get_local_client_for_key(scope)%0A%0A def @@ -1820,32 +1820,32 @@ imestamp=None):%0A - if times @@ -2300,40 +2300,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco @@ -2917,40 +2917,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco @@ -3519,40 +3519,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco @@ -4075,40 +4075,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco @@ -4589,40 +4589,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco @@ -6478,40 +6478,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco @@ -7005,40 +7005,23 @@ elf. -cluster.get_local_client_for_key +_get_connection (sco
2d0f76538f8927a85a2c51b0b6c34f54c775b883
Add kmeans receiver
lexos/receivers/kmeans_receiver.py
lexos/receivers/kmeans_receiver.py
Python
0.001009
@@ -0,0 +1,320 @@ +from lexos.receivers.base_receiver import BaseReceiver%0A%0A%0Aclass KmeansOption:%0A def __init__(self,):%0A%0A%0Aclass KmeansReceiver(BaseReceiver):%0A def options_from_front_end(self) -%3E KmeansOption:%0A %22%22%22Get the Kmeans option from front end.%0A%0A :return: a KmeansOption object to hold all the options.%0A %22%22%22%0A
ab87f960ecb6f330f4574d2e8dc6b3d4cc96c40f
add solution for Spiral Matrix II
src/spiralMatrixII.py
src/spiralMatrixII.py
Python
0
@@ -0,0 +1,718 @@ +class Solution:%0A # @return a list of lists of integer%0A%0A def generateMatrix(self, n):%0A if n == 0:%0A return %5B%5D%0A dirs = %5B%5B0, 1%5D, %5B1, 0%5D, %5B0, -1%5D, %5B-1, 0%5D%5D%0A cur = cur_d = 0%0A cur_x = cur_y = 0%0A matrix = %5B%5B0 for col in xrange(n)%5D for row in xrange(n)%5D%0A while cur != n*n:%0A cur += 1%0A matrix%5Bcur_x%5D%5Bcur_y%5D = cur%0A nx = cur_x + dirs%5Bcur_d%5D%5B0%5D%0A ny = cur_y + dirs%5Bcur_d%5D%5B1%5D%0A if nx %3C 0 or ny %3C 0 or nx == n or ny == n or matrix%5Bnx%5D%5Bny%5D:%0A cur_d = (cur_d+1) %25 4%0A nx = cur_x + dirs%5Bcur_d%5D%5B0%5D%0A ny = cur_y + dirs%5Bcur_d%5D%5B1%5D%0A cur_x, cur_y = nx, ny%0A return matrix%0A
69a031db7d83254291349804ee5f59fe9972f181
Add simple jitclass example
examples/jitclass.py
examples/jitclass.py
Python
0.00001
@@ -0,0 +1,905 @@ +%22%22%22%0AA simple jitclass example.%0A%22%22%22%0A%0Aimport numpy as np%0Afrom numba import jitclass # import the decorator%0Afrom numba import int32, float32 # import the types%0A%0Aspec = %5B%0A ('value', int32), # a simple scalar field%0A ('array', float32%5B:%5D), # an array field%0A%5D%0A%0A%0A@jitclass(spec)%0Aclass Bag(object):%0A def __init__(self, value):%0A self.value = value%0A self.array = np.zeros(value, dtype=np.float32)%0A%0A @property%0A def size(self):%0A return self.array.size%0A%0A def increment(self, val):%0A for i in range(self.size):%0A self.array%5Bi%5D += val%0A return self.array%0A%0A%0Amybag = Bag(21)%0Aprint('isinstance(mybag, Bag)', isinstance(mybag, Bag))%0Aprint('mybag.value', mybag.value)%0Aprint('mybag.array', mybag.array)%0Aprint('mybag.size', mybag.size)%0Aprint('mybag.increment(3)', mybag.increment(3))%0Aprint('mybag.increment(6)', mybag.increment(6))%0A
7350f2f81988b19d79b368193ffbb32111b08f67
Update TimeDepExtinction.py
experimental_code/TimeDepExtinction.py
experimental_code/TimeDepExtinction.py
#!/usr/bin/env python import argparse, os,sys from numpy import * import numpy as np from scipy.special import gamma from scipy.special import beta as f_beta import platform, time import csv from scipy.special import gdtr, gdtrix from scipy.special import betainc import scipy.stats np.set_printoptions(suppress=True) np.set_printoptions(precision=3) self_path=os.getcwd() # make up data s = np.random.uniform(1, 25, 10) # random speciation times e = np.random.uniform(0, s, 10) # random extinction times #OH# Weibull log PDF and CDF functions def log_wei_pdf(x,W_scale,W_shape): # Log of Weibull pdf log_wei_pdf = log(W_shape/W_scale) + (W_shape-1)*log(x/W_scale) - (x/W_scale)**W_shape return log_wei_pdf def wei_cdf(x,W_scale,W_shape): # Weibull cdf log_wei_cdf = 1 - exp(-(x/W_scale)**W_shape) return wei_cdf #OH# BDwe likelihood (constant speciation rate and age dependent weibull extinction) def BDwelik (l, m0, W_shape, W_scale): d = s-e birth_lik = len(s)*log(l)-sum(l*d) # log probability of speciation death_lik_de = sum(log(m0)+log_pdf_Weibull(e[e>0], W_shape, W_scale)) # log probability of death event dead_lik_wte = -sum(m0*wei_cdf(d,W_scale,W_shape)) # log probability of waiting time until death event lik = birth_lik + death_lik_de + death_lik_wte return lik # prior #OH# should this also be changed? def prior_gamma(L,a=2,b=2): return sum(scipy.stats.gamma.logpdf(L, a, scale=1./b,loc=0)) # function to update parameters def update_multiplier_proposal(i,d=1.2,f=1): S=shape(i) u = np.random.uniform(0,1,S) l = 2*log(d) m = exp(l*(u-.5)) ii = i * m U=sum(log(m)) return ii, U # create log file logfile = open("mcmc.log" , "wb") wlog=csv.writer(logfile, delimiter='\t') head = ["it","post","lik","prior","l","m","shape","scale"] wlog.writerow(head) logfile.flush() iteration =0 sampling_freq =10 # init parameters lA = 0.5 mA = 0.1 W_shapeA = 0.1 #OH# proposition of parameter, here starting with strong age-dependency, mainly high likelyhood for younger species W_scaleA = 0.1 #OH# proposition of parameter while True: # update parameters if np.random.random() >0.5: l, hastings = update_multiplier_proposal(lA) m = mA W_shape = W_shapeA W_scale = W_scaleA else: m, hastings = update_multiplier_proposal(mA) W_shape, hastings = update_multiplier_proposal(W_shapeA) W_scale, hastings = update_multiplier_proposal(W_scaleA) l = lA # calc lik lik = BDwelik(l, m, W_shape, W_scale) # calc priors prior = prior_gamma(l) + prior_gamma(m) + prior_gamma(W_shape) + prior_gamma(W_scale) if iteration ==0: likA = lik priorA = prior posterior_ratio = (prior+lik) - (priorA+likA) if posterior_ratio + hastings > log(np.random.random()): # accept state likA = lik priorA = prior lA = l mA = m W_shapeA = W_shape W_scaleA = W_scale if iteration % 100 ==0: print likA, priorA, lA, mA, W_shapeA, W_scaleA if iteration % sampling_freq ==0: log_state=[iteration,likA+priorA,likA,priorA,lA,mA,W_shapeA,W_scaleA] wlog.writerow(log_state) logfile.flush() iteration +=1 if iteration==10000: break
Python
0
@@ -988,14 +988,14 @@ (l)- +l* sum( -l* d) #
5273a97ab1da4b809573617d3fc01705c322992f
Add tests for form mixin.
thecut/authorship/tests/test_forms.py
thecut/authorship/tests/test_forms.py
Python
0
@@ -0,0 +1,2497 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import, unicode_literals%0Afrom django.test import TestCase%0Afrom django import forms%0Afrom mock import patch%0Afrom test_app.models import AuthorshipModel%0Afrom thecut.authorship.factories import UserFactory%0Afrom thecut.authorship.forms import AuthorshipMixin%0A%0A%0Aclass AuthorshipModelForm(AuthorshipMixin, forms.ModelForm):%0A%0A class Meta:%0A model = AuthorshipModel%0A fields = %5B%5D%0A%0A%0Aclass DummyUser(object):%0A%0A pass%0A%0A%0Aclass TestAuthorshipMixin(TestCase):%0A%0A def test_requires_an_extra_argument_on_creating_an_instance(self):%0A self.assertRaises(TypeError, AuthorshipModelForm)%0A%0A def test_sets_user_attribute(self):%0A%0A dummy_user = DummyUser()%0A%0A form = AuthorshipModelForm(user=dummy_user)%0A%0A self.assertEqual(dummy_user, form.user)%0A%0A%0Aclass DummyUnsavedModel(object):%0A%0A def __init__(self):%0A self.pk = None%0A%0A%0Aclass TestAuthorshipMixinSave(TestCase):%0A%0A @patch('django.forms.ModelForm.save')%0A def test_calls_super_class_save_method(self, superclass_save):%0A%0A form = AuthorshipModelForm(user=UserFactory())%0A form.instance = DummyUnsavedModel()%0A%0A form.save()%0A%0A self.assertTrue(superclass_save.called)%0A%0A @patch('django.forms.ModelForm.save')%0A def test_sets_updated_by_to_given_user(self, superclass_save):%0A user = DummyUser()%0A form = AuthorshipModelForm(user=user)%0A form.instance = DummyUnsavedModel()%0A form.cleaned_data = %7B%7D%0A%0A form.save()%0A%0A self.assertEqual(user, form.instance.updated_by)%0A%0A @patch('django.forms.ModelForm.save')%0A def test_sets_created_by_if_instance_is_not_saved(self, superclass_save):%0A user = DummyUser()%0A form = AuthorshipModelForm(user=user)%0A form.instance = DummyUnsavedModel()%0A form.cleaned_data = %7B%7D%0A%0A form.save()%0A%0A self.assertEqual(user, form.instance.created_by)%0A%0A @patch('django.forms.ModelForm.save')%0A def test_does_not_set_created_by_if_instance_is_saved(self,%0A superclass_save):%0A%0A class DummySavedModel(object):%0A%0A def __init__(self):%0A self.pk = 'arbitrary-value'%0A self.created_by = 'arbitrary-value'%0A%0A user = DummyUser()%0A form = AuthorshipModelForm(user=user)%0A form.instance = DummySavedModel()%0A form.cleaned_data = %7B%7D%0A%0A form.save()%0A%0A self.assertNotEqual(user, form.instance.created_by)%0A
e838b6d53f131badfbb7b51b4eb268ebb5d7c450
Add tests for using the new Entity ID tracking in the rule matcher
spacy/tests/matcher/test_entity_id.py
spacy/tests/matcher/test_entity_id.py
Python
0
@@ -0,0 +1,1777 @@ +from __future__ import unicode_literals%0Aimport spacy%0Afrom spacy.vocab import Vocab%0Afrom spacy.matcher import Matcher%0Afrom spacy.tokens.doc import Doc%0Afrom spacy.attrs import *%0A%0Aimport pytest%0A%0A%[email protected]%0Adef en_vocab():%0A return spacy.get_lang_class('en').Defaults.create_vocab()%0A%0A%0Adef test_init_matcher(en_vocab):%0A matcher = Matcher(en_vocab)%0A assert matcher.n_patterns == 0%0A assert matcher(Doc(en_vocab, words=%5Bu'Some', u'words'%5D)) == %5B%5D%0A%0A%0Adef test_add_empty_entity(en_vocab):%0A matcher = Matcher(en_vocab)%0A matcher.add_entity('TestEntity')%0A assert matcher.n_patterns == 0%0A assert matcher(Doc(en_vocab, words=%5Bu'Test', u'Entity'%5D)) == %5B%5D%0A%0A%0Adef test_get_entity_attrs(en_vocab):%0A matcher = Matcher(en_vocab)%0A matcher.add_entity('TestEntity')%0A entity = matcher.get_entity('TestEntity')%0A assert entity == %7B%7D %0A matcher.add_entity('TestEntity2', attrs=%7B'Hello': 'World'%7D)%0A entity = matcher.get_entity('TestEntity2')%0A assert entity == %7B'Hello': 'World'%7D %0A assert matcher.get_entity('TestEntity') == %7B%7D%0A%0A%0Adef test_get_entity_via_match(en_vocab):%0A matcher = Matcher(en_vocab)%0A matcher.add_entity('TestEntity', attrs=%7Bu'Hello': u'World'%7D)%0A assert matcher.n_patterns == 0%0A assert matcher(Doc(en_vocab, words=%5Bu'Test', u'Entity'%5D)) == %5B%5D%0A matcher.add_pattern(u'TestEntity', %5B%7BORTH: u'Test'%7D, %7BORTH: u'Entity'%7D%5D)%0A assert matcher.n_patterns == 1%0A matches = matcher(Doc(en_vocab, words=%5Bu'Test', u'Entity'%5D))%0A assert len(matches) == 1%0A assert len(matches%5B0%5D) == 4%0A ent_id, label, start, end = matches%5B0%5D%0A assert ent_id == matcher.vocab.strings%5Bu'TestEntity'%5D%0A assert label == 0%0A assert start == 0%0A assert end == 2%0A attrs = matcher.get_entity(ent_id)%0A assert attrs == %7Bu'Hello': u'World'%7D%0A%0A%0A%0A
2cf812ba2015bfcc392a2f401c253850b31060c7
Make sure all tags are alphanumeric
perf_insights/perf_insights/upload.py
perf_insights/perf_insights/upload.py
# Copyright (c) 2015 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. import os import sys import webapp2 import uuid from perf_insights import trace_info sys.path.append('third_party') import cloudstorage as gcs default_retry_params = gcs.RetryParams(initial_delay=0.2, max_delay=5.0, backoff_factor=2, max_retry_period=15) gcs.set_default_retry_params(default_retry_params) class UploadPage(webapp2.RequestHandler): def get(self): self.response.out.write(""" <html><body> <head><title>Performance Insights - Trace Uploader</title></head> <form action="/upload" enctype="multipart/form-data" method="post"> <div><input type="file" name="trace"/></div> <div><input type="submit" value="Upload"></div> </form><hr> </body></html>""") def post(self): trace_uuid = str(uuid.uuid4()) bucket_name = ('/performance-insights/' + trace_uuid) gcs_file = gcs.open(bucket_name, 'w', content_type='application/octet-stream', options={}, retry_params=default_retry_params) gcs_file.write(self.request.get('trace')) gcs_file.close() trace_object = trace_info.TraceInfo(id=trace_uuid) trace_object.prod = self.request.get('prod') trace_object.remote_addr = os.environ["REMOTE_ADDR"] tags_string = self.request.get('tags') if len(tags_string): trace_object.tags = tags_string.split(',') trace_object.user_agent = self.request.headers.get('User-Agent') trace_object.ver = self.request.get('product_version') trace_object.put() self.response.write(trace_uuid) app = webapp2.WSGIApplication([('/upload', UploadPage)])
Python
0
@@ -170,16 +170,26 @@ port os%0A +import re%0A import s @@ -1666,25 +1666,81 @@ if -len(tags_string): +re.match('%5E%5Ba-zA-Z0-9,%5D+$', tags_string): # ignore non alpha-numeric tags %0A
a1eff713339d528720ed5999d05a85066018f070
Add visualise.py
visualise.py
visualise.py
Python
0.000369
@@ -0,0 +1,1477 @@ +# visualise.py%0D%0A%0D%0A# Imports%0D%0Aimport argparse%0D%0Aimport json%0D%0Aimport numpy as np%0D%0Aimport matplotlib.pyplot as plt%0D%0Afrom mpl_toolkits.mplot3d import Axes3D%0D%0A%0D%0Afrom uniform_bspline import Contour%0D%0A%0D%0A%0D%0A# main%0D%0Adef main():%0D%0A parser = argparse.ArgumentParser()%0D%0A parser.add_argument('input_path')%0D%0A parser.add_argument('--num-samples', type=int, default=1024)%0D%0A args = parser.parse_args()%0D%0A%0D%0A print 'Input:', args.input_path%0D%0A with open(args.input_path, 'rb') as fp:%0D%0A z = json.load(fp)%0D%0A%0D%0A degree, num_control_points, dim, is_closed = (%0D%0A z%5B'degree'%5D, z%5B'num_control_points'%5D, z%5B'dim'%5D, z%5B'is_closed'%5D)%0D%0A%0D%0A print ' degree:', degree%0D%0A print ' num_control_points:', num_control_points%0D%0A print ' dim:', dim%0D%0A print ' is_closed:', is_closed%0D%0A c = Contour(degree, num_control_points, dim, is_closed=is_closed)%0D%0A%0D%0A Y, w, u, X = map(np.array, %5Bz%5B'Y'%5D, z%5B'w'%5D, z%5B'u'%5D, z%5B'X'%5D%5D)%0D%0A print ' num_data_points:', Y.shape%5B0%5D%0D%0A%0D%0A kw = %7B%7D%0D%0A if Y.shape%5B1%5D == 3:%0D%0A kw%5B'projection'%5D = '3d'%0D%0A f = plt.figure()%0D%0A ax = f.add_subplot(111, **kw)%0D%0A ax.set_aspect('equal')%0D%0A def plot(X, *args, **kwargs):%0D%0A ax.plot(*(tuple(X.T) + args), **kwargs)%0D%0A%0D%0A plot(Y, 'ro')%0D%0A%0D%0A for m, y in zip(c.M(u, X), Y):%0D%0A plot(np.r_%5B'0,2', m, y%5D, 'k-')%0D%0A%0D%0A plot(X, 'bo--', ms=8.0)%0D%0A plot(c.M(c.uniform_parameterisation(args.num_samples), X), 'b-', lw=2.0)%0D%0A%0D%0A plt.show()%0D%0A%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A main()%0D%0A
ee5089a6a16c5a6142444a0ad312fdb641aa845c
Fix tests
test/test.py
test/test.py
#!/usr/bin/env python import locale import os import sys import unittest from tempfile import TemporaryFile sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))) from argparse import ArgumentParser from argcomplete import * IFS = '\013' class TestArgcomplete(unittest.TestCase): @classmethod def setUpClass(cls): os.environ['_ARGCOMPLETE'] = "yes" os.environ['_ARC_DEBUG'] = "yes" os.environ['IFS'] = IFS def setUp(self): pass def tearDown(self): pass def run_completer(self, parser, command, point=None): with TemporaryFile() as t: os.environ['COMP_LINE'] = command os.environ['COMP_POINT'] = point if point else str(len(command)) with self.assertRaises(SystemExit): autocomplete(parser, output_stream=t, exit_method=sys.exit) t.seek(0) return t.read().decode(locale.getpreferredencoding()).split(IFS) def test_basic_completion(self): p = ArgumentParser() p.add_argument("--foo") p.add_argument("--bar") completions = self.run_completer(p, "prog ") assert(set(completions) == set(['-h', '--help', '--foo', '--bar'])) if __name__ == '__main__': unittest.main()
Python
0.000003
@@ -753,16 +753,75 @@ mmand))%0A + os.environ%5B'COMP_WORDBREAKS'%5D = '%22%5C'@%3E%3C=;%7C&(:'%0A
13e45a8578e57e2cb55b29980b0f3326dd393a20
Create sump_monitor.py
sump_monitor.py
sump_monitor.py
Python
0.000002
@@ -0,0 +1,1720 @@ +#Import the required modules%0Aimport RPi.GPIO as GPIO%0Aimport time%0Aimport requests%0Aimport math%0A%0A#Setup the GPIO%0AGPIO.setmode(GPIO.BCM)%0A%0A#Define the TRIG and ECO pins - these are labeled on the sensor%0ATRIG = 23%0AECHO = 24%0A%0A#Number of readings we are going to take to avoid issues%0Anumreadings = 7%0A%0A#Alert that we are starting the measurement%0Aprint %22Distance Measurement In Progress%22%0A%0A#Loop based on the above number%0Adistancearray=%5B%5D%0Acount = 0%0Awhile (count %3C numreadings):%0A #Setup the two pins for reading%0A GPIO.setup(TRIG,GPIO.OUT)%0A GPIO.setup(ECHO,GPIO.IN)%0A%0A GPIO.output(TRIG, False)%0A print %22Waiting For Sensor To Settle%22%0A time.sleep(2)%0A%0A GPIO.output(TRIG, True)%0A time.sleep(0.00001)%0A GPIO.output(TRIG, False)%0A%0A while GPIO.input(ECHO)==0:%0A pulse_start = time.time()%0A%0A while GPIO.input(ECHO)==1:%0A pulse_end = time.time()%0A%0A pulse_duration = pulse_end - pulse_start%0A%0A distance = pulse_duration * 17150%0A%0A distance = round(distance, 2)%0A%0A print %22Distance:%22,distance,%22cm%22%0A%0A distancearray.append(distance)%0A%0A count = count + 1%0A%0A#Get the half of the reading number and round up%0Amid = numreadings / 2%0Amid = int(math.ceil(mid))%0A%0A#Sort the array%0Adistancearray.sort()%0A%0A#Just for debugging%0Aprint distancearray%0Aprint distancearray%5Bmid%5D%0A%0A#Put the middle value back into the distance variable%0Adistance = distancearray%5Bmid%5D%0A%0A#Write the data to the influxdn instance%0Adata = 'environment,host=rpi1,location=basement,type=sumppump value=' + str(distance)%0Aprint data%0Aoutput = requests.post('http://192.168.9.42:8086/write?db=home', data=data)%0Aprint output%0A%0A#Release connections to the GPIO pins%0AGPIO.cleanup()%0A
9a6ca54f7cca0bd5f21f0bc590a034e7e3e05b6e
Add migration to add userprofiles to existing users
src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py
src/icp/apps/user/migrations/0002_add_userprofiles_to_existing_users.py
Python
0
@@ -0,0 +1,628 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.contrib.postgres.fields%0Afrom django.conf import settings%0A%0Adef create_user_profiles_for_existing_users(apps, schema_editor):%0A User = apps.get_model('auth', 'User')%0A UserProfile = apps.get_model('user', 'UserProfile')%0A%0A for user in User.objects.all():%0A UserProfile.objects.create(user=user)%0A %0A%0Aclass Migration(migrations.Migration):%0A %0A dependencies = %5B%0A ('user', '0001_initial')%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(create_user_profiles_for_existing_users)%0A %5D%0A
c2d658ed1caa91eb963a3df850b5cf9b99633f69
Add missing transpose.py
python/bifrost/transpose.py
python/bifrost/transpose.py
Python
0.006976
@@ -0,0 +1,1978 @@ +%0A# Copyright (c) 2016, The Bifrost Authors. All rights reserved.%0A# Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.%0A#%0A# Redistribution and use in source and binary forms, with or without%0A# modification, are permitted provided that the following conditions%0A# are met:%0A# * Redistributions of source code must retain the above copyright%0A# notice, this list of conditions and the following disclaimer.%0A# * Redistributions in binary form must reproduce the above copyright%0A# notice, this list of conditions and the following disclaimer in the%0A# documentation and/or other materials provided with the distribution.%0A# * Neither the name of The Bifrost Authors nor the names of its%0A# contributors may be used to endorse or promote products derived%0A# from this software without specific prior written permission.%0A#%0A# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS %60%60AS IS'' AND ANY%0A# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE%0A# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR%0A# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR%0A# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,%0A# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,%0A# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR%0A# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY%0A# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT%0A# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE%0A# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.%0A%0Afrom libbifrost import _bf, _check, _get, _string2space%0Afrom ndarray import asarray%0A%0Aimport ctypes%0A%0Adef transpose(dst, src, axes=None):%0A%09if axes is None:%0A%09%09axes = reversed(range(len(dst.shape)))%0A%09dst_bf = asarray(dst).as_BFarray()%0A%09src_bf = asarray(src).as_BFarray()%0A%09array_type = ctypes.c_int*src.ndim%0A%09axes_array = array_type(*axes)%0A%09_check(_bf.Transpose(src_bf, dst_bf, axes_array))%0A
f46b08ce3d45b44d3f71759705e8045322c6155d
Create __init__.py
pythainlp/spell/__init__.py
pythainlp/spell/__init__.py
Python
0.000429
@@ -0,0 +1,7 @@ +# TODO%0A
34001081c2cfaa86d85f7a5b51925dca4a6e1a9f
Use Python 3 type syntax in `zerver/webhooks/yo/view.py`.
zerver/webhooks/yo/view.py
zerver/webhooks/yo/view.py
# Webhooks for external integrations. from typing import Optional import ujson from django.http import HttpRequest, HttpResponse from zerver.decorator import api_key_only_webhook_view from zerver.lib.actions import check_send_private_message from zerver.lib.request import REQ, has_request_variables from zerver.lib.response import json_success from zerver.models import UserProfile, get_user @api_key_only_webhook_view('Yo') @has_request_variables def api_yo_app_webhook(request, user_profile, email=REQ(default=""), username=REQ(default='Yo Bot'), topic=REQ(default=None), user_ip=REQ(default=None)): # type: (HttpRequest, UserProfile, str, str, Optional[str], Optional[str]) -> HttpResponse body = ('Yo from %s') % (username,) receiving_user = get_user(email, user_profile.realm) check_send_private_message(user_profile, request.client, receiving_user, body) return json_success()
Python
0
@@ -475,16 +475,29 @@ (request +: HttpRequest , user_p @@ -506,16 +506,59 @@ file -, email= +: UserProfile,%0A email: str = REQ( @@ -601,17 +601,24 @@ username -= +: str = REQ(defa @@ -635,15 +635,55 @@ t'), - topic= +%0A topic: Optional%5Bstr%5D = REQ( @@ -731,106 +731,43 @@ r_ip -=REQ(default=None)):%0A # type: (HttpRequest, UserProfile, str, str, Optional%5Bstr%5D, Optional%5Bstr%5D +: Optional%5Bstr%5D = REQ(default=None) ) -%3E @@ -779,17 +779,17 @@ Response -%0A +: %0A bod
8c98d12a08617b9a1ab1a264b826f5e9046eca05
Add getHWND/getAllWindows utility functions for bots.
assisstant/bots/utility.py
assisstant/bots/utility.py
Python
0
@@ -0,0 +1,641 @@ +import subprocess%0A%0A# criteria: dictionary that has key/values to match against.%0A# e.g. %7B%22wm_class%22: %22Navigator.Firefox%22%7D%0Adef getHWND(criteria):%0A windows = getAllWindows()%0A for window in windows:%0A if criteria.items() %3C= window.items():%0A return window%0A return None%0A%0A%0Adef getAllWindows():%0A windows = %5B%5D%0A with subprocess.Popen(%5B%22wmctrl%22, %22-l%22, %22-p%22, %22-x%22%5D, stdout=subprocess.PIPE, bufsize=1, universal_newlines=True) as p:%0A for line in p.stdout:%0A tokens = line.split()%0A windows.append(%7B%22hwnd%22: tokens%5B0%5D, %22workspace%22: tokens%5B1%5D, %22pid%22: tokens%5B2%5D, %22wm_class%22: tokens%5B3%5D, %22title%22: %22 %22.join(tokens%5B5:%5D)%7D)%0A return windows%0A
d9133f865c8f0c64e589e902c88a8e85feb77963
remove call to the deleted function.
tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py
tensorflow/lite/micro/tools/make/fix_arduino_subfolders.py
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Moves source files to match Arduino library conventions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse import glob import os import six def rename_example_subfolder_files(library_dir): """Moves source files in example subfolders to equivalents at root.""" patterns = ['*.h', '*.cpp', '*.c'] for pattern in patterns: search_path = os.path.join(library_dir, 'examples/*/*', pattern) for source_file_path in glob.glob(search_path): source_file_dir = os.path.dirname(source_file_path) source_file_base = os.path.basename(source_file_path) new_source_file_path = source_file_dir + '_' + source_file_base os.rename(source_file_path, new_source_file_path) def move_person_data(library_dir): """Moves the downloaded person model into the examples folder.""" old_person_data_path = os.path.join( library_dir, 'src/tensorflow/lite/micro/tools/make/downloads/' + 'person_model_int8/person_detect_model_data.cpp') new_person_data_path = os.path.join( library_dir, 'examples/person_detection/person_detect_model_data.cpp') if os.path.exists(old_person_data_path): os.rename(old_person_data_path, new_person_data_path) # Update include. with open(new_person_data_path, 'r') as source_file: file_contents = source_file.read() file_contents = file_contents.replace( six.ensure_str('#include "tensorflow/lite/micro/examples/' + 'person_detection/person_detect_model_data.h"'), '#include "person_detect_model_data.h"') with open(new_person_data_path, 'w') as source_file: source_file.write(file_contents) def rename_example_main_inos(library_dir): """Makes sure the .ino sketch files match the example name.""" search_path = os.path.join(library_dir, 'examples/*', 'main.ino') for ino_path in glob.glob(search_path): example_path = os.path.dirname(ino_path) example_name = os.path.basename(example_path) new_ino_path = os.path.join(example_path, example_name + '.ino') os.rename(ino_path, new_ino_path) def main(unparsed_args): """Control the rewriting of source files.""" library_dir = unparsed_args[0] rename_example_subfolder_files(library_dir) rename_example_main_inos(library_dir) move_person_data(library_dir) move_image_data_experimental(library_dir) def parse_args(): """Converts the raw arguments into accessible flags.""" parser = argparse.ArgumentParser() _, unparsed_args = parser.parse_known_args() main(unparsed_args) if __name__ == '__main__': parse_args()
Python
0
@@ -3075,52 +3075,8 @@ ir)%0A - move_image_data_experimental(library_dir)%0A %0A%0Ade
4912c8261dba456e8e4a62051afdf01565f20ae9
Add first iteration of raw_to_average_jpegs.py.
raw_to_average_jpegs.py
raw_to_average_jpegs.py
Python
0.000001
@@ -0,0 +1,1640 @@ +#! /usr/bin/env python%0A#%0A# Tested on Macs. First run %60brew install ufraw exiftool%60%0A%0Aimport argparse%0Aimport glob%0Aimport multiprocessing as mp%0Aimport os%0Aimport subprocess%0A%0A%0Adef parseArgs():%0A desc = 'Auto-white-balance raw images and create average-sized JPEG files with their EXIF info.'%0A parser = argparse.ArgumentParser(description=desc)%0A parser.add_argument('-p', '--path', dest='imagesPath', default=os.getcwd(),%0A help='Sets the path containing the DNG images. Default is the current ' + %5C%0A 'working directory, which is: %25(default)s')%0A return parser, parser.parse_args()%0A%0A%0Adef processFiles(fname):%0A subprocess.check_call(%5B'ufraw-batch', '--wb=auto', '--overwrite',%0A '--size=2048', '--out-type=jpeg', fname%5D)%0A subprocess.check_call(%5B'exiftool', '-overwrite_original', '-q', '-x', 'Orientation',%0A '-TagsFromFile', fname, fname.replace('.DNG', '.jpg')%5D)%0A%0A%0Adef workingProgramCheck(prog):%0A '''Checks whether the program is accessible on the system.'''%0A try:%0A subprocess.check_call(%5B'which', '-s', prog%5D)%0A except Exception:%0A raise Exception(prog + ' is not accessible on the system.')%0A%0A%0Adef main():%0A parser, args = parseArgs()%0A%0A # Check whether ufraw and exiftool are working properly.%0A workingProgramCheck('ufraw-batch')%0A workingProgramCheck('exiftool')%0A%0A pool = mp.Pool(mp.cpu_count())%0A%0A for fname in glob.glob(os.path.normpath(os.path.join(args.imagesPath, '*.DNG'))):%0A pool.apply_async(processFiles, %5Bfname%5D)%0A%0A pool.close()%0A pool.join()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
6aaa08a48dade981de18b117363357fdffaeb641
add python_capstone_setup.py
suite/python_capstone_setup.py
suite/python_capstone_setup.py
Python
0.000012
@@ -0,0 +1,127 @@ +#!/bin/sh%0A# this prints out Capstone setup & core+Python-binding versions%0A%0Apython -c %22import capstone; print capstone.debug()%22%0A
077cf46ab42c76bf3a854142a4f530625a377837
Create tutorial2.py
tutorial2.py
tutorial2.py
Python
0
@@ -0,0 +1 @@ +%0A
1bb1ececfcd548d52a28b713f4ee7eb4e710da85
Add an example of using fchollet multi_gpu_model on InceptionV3.
keras_tf_multigpu/examples/fchollet_inception3_multigpu.py
keras_tf_multigpu/examples/fchollet_inception3_multigpu.py
Python
0
@@ -0,0 +1,1078 @@ +import tensorflow as tf%0Afrom keras.applications import InceptionV3%0Afrom keras.utils import multi_gpu_model%0Aimport numpy as np%0A%0Anum_samples = 1000%0Aheight = 224%0Awidth = 224%0Anum_classes = 1000%0A%0Agpu_count = 2%0A%0A# Instantiate the base model%0A# (here, we do it on CPU, which is optional).%0Awith tf.device('/cpu:0' if gpu_count %3E 1 else '/gpu:0'):%0A model = InceptionV3(weights=None,%0A input_shape=(height, width, 3),%0A classes=num_classes)%0A%0A# Replicates the model on N GPUs.%0A# This assumes that your machine has N available GPUs.%0Aif gpu_count %3E 1:%0A parallel_model = multi_gpu_model(model, gpus=gpu_count)%0Aelse:%0A parallel_model = model%0Aparallel_model.compile(loss='categorical_crossentropy',%0A optimizer='rmsprop')%0A%0A# Generate dummy data.%0Ax = np.random.random((num_samples, height, width, 3))%0Ay = np.random.random((num_samples, num_classes))%0A%0A# This %60fit%60 call will be distributed on N GPUs.%0A# Since the batch size is N*32, each GPU will process 32 samples.%0Aparallel_model.fit(x, y, epochs=20, batch_size=32 * gpu_count)%0A
9ffafa9c11e71c176adb4056fbc780e450cc0d82
Add experimental queries module.
databroker/queries.py
databroker/queries.py
Python
0
@@ -0,0 +1,1192 @@ +%22%22%22%0AThis module is experimental.%0A%22%22%22%0Aimport collections.abc%0Aimport abc%0A%0A%0Aclass Query(collections.abc.Mapping):%0A %22%22%22%0A This represents a MongoDB query.%0A %0A MongoDB queries are typically encoded as simple dicts. This object supports%0A the dict interface in a read-only fashion. Subclassses add a nice __repr__%0A and mutable attributes from which the contents of the dict are derived.%0A %22%22%22%0A @abc.abstractproperty%0A def _query(self):%0A ...%0A%0A def __iter__(self):%0A return iter(self._query)%0A%0A def __getitem__(self, key):%0A return self._query%5Bkey%5D%0A %0A def __len__(self):%0A return len(self._query)%0A%0A%0Aclass TimeRange(Query):%0A %22%22%22%0A A search query representing a time range.%0A %22%22%22%0A def __init__(self, since=None, until=None):%0A self.since = since%0A self.until = until%0A%0A @property%0A def _query(self):%0A query = %7B'time': %7B%7D%7D%0A if self.since is not None:%0A query%5B'time'%5D%5B'$gte'%5D = self.since%0A if self.until is not None:%0A query%5B'time'%5D%5B'$lt'%5D = self.until%0A return query%0A%0A def __repr__(self):%0A return f%22%7Btype(self).__name__%7D(since=%7Bself.since%7D, until=%7Bself.until%7D)%22%0A
8c7fc2382db0ec9c901f6c2c2b00971f3ee7c3cc
Add tests for custom authentication backend
logintokens/tests/test_backends.py
logintokens/tests/test_backends.py
Python
0
@@ -0,0 +1,1884 @@ +%22%22%22logintokens app unittests for backends%0A%0A%22%22%22%0Afrom time import sleep%0A%0Afrom django.test import TestCase, Client%0Afrom django.contrib.auth import get_user_model, authenticate%0A%0Afrom logintokens.tokens import default_token_generator%0A%0A%0AUSER = get_user_model()%0A%0A%0Aclass EmailOnlyAuthenticationBackendTest(TestCase):%0A %22%22%22Tests for email only authentication backend%0A%0A %22%22%22%0A def setUp(self):%0A self.client = Client()%0A self.generator = default_token_generator%0A self.new_username = 'newvisitor'%0A self.existing_user = USER._default_manager.create_user('existinguser')%0A%0A def test_different_tokens_usable(self):%0A %22%22%22Two differing tokens should both be usabe to authenticate.%0A%0A %22%22%22%0A username = self.existing_user.get_username()%0A token1 = self.generator.make_token(username)%0A sleep(1)%0A token2 = self.generator.make_token(username)%0A%0A self.assertNotEqual(token1, token2)%0A self.assertEqual(authenticate(token=token1), self.existing_user)%0A self.assertEqual(authenticate(token=token2), self.existing_user)%0A%0A def test_login_invalidates_tokens(self):%0A %22%22%22Tokens generated before a successful login should become invalid.%0A%0A %22%22%22%0A username = self.existing_user.get_username()%0A token1 = self.generator.make_token(username)%0A sleep(1)%0A token2 = self.generator.make_token(username)%0A%0A self.assertNotEqual(token1, token2)%0A%0A self.client.force_login(self.existing_user)%0A%0A self.assertIsNone(authenticate(token=token1))%0A self.assertIsNone(authenticate(token=token2))%0A%0A def test_new_visitor_creates_user(self):%0A %22%22%22Using a token from a new visitor should create their user object.%0A%0A %22%22%22%0A token = self.generator.make_token(self.new_username)%0A user = authenticate(token=token)%0A self.assertIsInstance(user, USER)%0A
f746c2a8a59342060d404944a586b11e1f46df5a
Merge with lp:openobject-addons
addons/product_visible_discount/__openerp__.py
addons/product_visible_discount/__openerp__.py
############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## { 'name': 'Prices Visible Discounts', 'version': '1.0', 'author': 'OpenERP SA', 'category': 'Sales Management', 'description': """ This module lets you calculate discounts on Sale Order lines and Invoice lines base on the partner's pricelist. =============================================================================================================== To this end, a new check box named 'Visible Discount' is added to the pricelist form. **Example:** For the product PC1 and the partner "Asustek": if listprice=450, and the price calculated using Asustek's pricelist is 225. If the check box is checked, we will have on the sale order line: Unit price=450, Discount=50,00, Net price=225. If the check box is unchecked, we will have on Sale Order and Invoice lines: Unit price=225, Discount=0,00, Net price=225. """, "depends": ["sale","purchase"], "demo_xml": [], "update_xml": ['product_visible_discount_view.xml'], "auto_install": False, "installable": True, "certificate" : "001144718884654279901", 'images': ['images/pricelists_visible_discount.jpeg'], } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Python
0
@@ -1820,17 +1820,17 @@ -%22 +' depends -%22 +' : %5B%22 @@ -1856,18 +1856,14 @@ -%22 +' demo -_xml%22 +' : %5B%5D @@ -1872,20 +1872,14 @@ -%22update_xml%22 +'data' : %5B' @@ -1919,17 +1919,17 @@ '%5D,%0A -%22 +' auto_ins @@ -1932,17 +1932,17 @@ _install -%22 +' : False, @@ -1946,17 +1946,17 @@ se,%0A -%22 +' installa @@ -1958,17 +1958,17 @@ tallable -%22 +' : True,%0A @@ -1971,17 +1971,17 @@ ue,%0A -%22 +' certific @@ -1983,17 +1983,17 @@ tificate -%22 +' : %220011
54ca48a2b8cbd53cd6506fdbce47d16f03a28a7d
Add unit tests for bubble sort
tests/test_sorting_and_searching/test_bubble_sort.py
tests/test_sorting_and_searching/test_bubble_sort.py
Python
0
@@ -0,0 +1,446 @@ +import unittest%0A%0Afrom aids.sorting_and_searching.bubble_sort import bubble_sort%0A%0Aclass BubbleSortTestCase(unittest.TestCase):%0A '''%0A Unit tests for bubble sort%0A%0A '''%0A%0A def setUp(self):%0A self.example_1 = %5B2, 5, 4, 3, 1%5D%0A%0A def test_bubble_sort(self):%0A bubble_sort(self.example_1)%0A self.assertEqual(self.example_1,%5B1,2,3,4,5%5D)%0A%0A%0A def tearDown(self):%0A pass%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
38e75951570be46f6a36eeb000a4621bc76bf02a
Move history plugin to learn phase.
flexget/plugins/output/history.py
flexget/plugins/output/history.py
from __future__ import unicode_literals, division, absolute_import import logging from datetime import datetime from sqlalchemy import Column, String, Integer, DateTime, Unicode, desc from flexget import options, plugin from flexget.event import event from flexget.manager import Base, Session from flexget.utils.tools import console log = logging.getLogger('history') class History(Base): __tablename__ = 'history' id = Column(Integer, primary_key=True) task = Column('feed', String) filename = Column(String) url = Column(String) title = Column(Unicode) time = Column(DateTime) details = Column(String) def __init__(self): self.time = datetime.now() def __str__(self): return '<History(filename=%s,task=%s)>' % (self.filename, self.task) class PluginHistory(object): """Records all accepted entries for later lookup""" schema = {'type': 'boolean'} @plugin.priority(-255) def on_task_output(self, task, config): """Add accepted entries to history""" if config is False: return # Explicitly disabled with configuration for entry in task.accepted: item = History() item.task = task.name item.filename = entry.get('output', None) item.title = entry['title'] item.url = entry['url'] reason = '' if 'reason' in entry: reason = ' (reason: %s)' % entry['reason'] item.details = 'Accepted by %s%s' % (entry.get('accepted_by', '<unknown>'), reason) task.session.add(item) def do_cli(manager, options): session = Session() try: console('-- History: ' + '-' * 67) query = session.query(History) if options.search: search_term = options.search.replace(' ', '%').replace('.', '%') query = query.filter(History.title.like('%' + search_term + '%')) query = query.order_by(desc(History.time)).limit(options.limit) for item in reversed(query.all()): console(' Task : %s' % item.task) console(' Title : %s' % item.title) console(' Url : %s' % item.url) if item.filename: console(' Stored : %s' % item.filename) console(' Time : %s' % item.time.strftime("%c")) console(' Details : %s' % item.details) console('-' * 79) finally: session.close() @event('options.register') def register_parser_arguments(): parser = options.register_command('history', do_cli, help='view the history of entries that FlexGet has accepted') parser.add_argument('--limit', action='store', type=int, metavar='NUM', default=50, help='limit to %(metavar)s results') parser.add_argument('--search', action='store', metavar='TERM', help='limit to results that contain %(metavar)s') @event('plugin.register') def register_plugin(): plugin.register(PluginHistory, 'history', builtin=True, api_ver=2)
Python
0
@@ -924,35 +924,8 @@ '%7D%0A%0A - @plugin.priority(-255)%0A @@ -936,22 +936,21 @@ on_task_ -output +learn (self, t
70e1910ef01c6313360dff3f3e728e4f5f404f38
Allow history to be filtered by task
flexget/plugins/output/history.py
flexget/plugins/output/history.py
from __future__ import unicode_literals, division, absolute_import import logging from datetime import datetime from sqlalchemy import Column, String, Integer, DateTime, Unicode, desc from flexget import options, plugin from flexget.event import event from flexget.logger import console from flexget.manager import Base, Session log = logging.getLogger('history') class History(Base): __tablename__ = 'history' id = Column(Integer, primary_key=True) task = Column('feed', String) filename = Column(String) url = Column(String) title = Column(Unicode) time = Column(DateTime) details = Column(String) def __init__(self): self.time = datetime.now() def __str__(self): return '<History(filename=%s,task=%s)>' % (self.filename, self.task) class PluginHistory(object): """Records all accepted entries for later lookup""" schema = {'type': 'boolean'} def on_task_learn(self, task, config): """Add accepted entries to history""" if config is False: return # Explicitly disabled with configuration for entry in task.accepted: item = History() item.task = task.name item.filename = entry.get('output', None) item.title = entry['title'] item.url = entry['url'] reason = '' if 'reason' in entry: reason = ' (reason: %s)' % entry['reason'] item.details = 'Accepted by %s%s' % (entry.get('accepted_by', '<unknown>'), reason) task.session.add(item) def do_cli(manager, options): session = Session() try: console('-- History: ' + '-' * 67) query = session.query(History) if options.search: search_term = options.search.replace(' ', '%').replace('.', '%') query = query.filter(History.title.like('%' + search_term + '%')) query = query.order_by(desc(History.time)).limit(options.limit) for item in reversed(query.all()): console(' Task : %s' % item.task) console(' Title : %s' % item.title) console(' Url : %s' % item.url) if item.filename: console(' Stored : %s' % item.filename) console(' Time : %s' % item.time.strftime("%c")) console(' Details : %s' % item.details) console('-' * 79) finally: session.close() @event('options.register') def register_parser_arguments(): parser = options.register_command('history', do_cli, help='view the history of entries that FlexGet has accepted') parser.add_argument('--limit', action='store', type=int, metavar='NUM', default=50, help='limit to %(metavar)s results') parser.add_argument('--search', action='store', metavar='TERM', help='limit to results that contain %(metavar)s') @event('plugin.register') def register_plugin(): plugin.register(PluginHistory, 'history', builtin=True, api_ver=2)
Python
0.000008
@@ -1900,16 +1900,119 @@ + '%25'))%0A + if options.task:%0A query = query.filter(History.task.like('%25' + options.task + '%25'))%0A @@ -2983,16 +2983,131 @@ var)s')%0A + parser.add_argument('--task', action='store', metavar='TASK', help='limit to results in specified %25(metavar)s') %0A%0A@event
0b047f5b6123d851916ed12114512ddebec58225
Add 20150509 question.
LeetCode/add_two_numbers.py
LeetCode/add_two_numbers.py
Python
0.000001
@@ -0,0 +1,1848 @@ +%22%22%22%0AYou are given two linked lists representing two non-negative numbers.%0AThe digits are stored in reverse order and each of their nodes contain a single%0Adigit. Add the two numbers and return it as a linked list.%0A%0AInput: (2 -%3E 4 -%3E 3) + (5 -%3E 6 -%3E 4)%0AOutput: 7 -%3E 0 -%3E 8%0A%0ANotice: extra digit.%0A%22%22%22%0A%0A%0Aclass ListNode:%0A %22%22%22%0A Definition for singly-linked list.%0A %22%22%22%0A def __init__(self, x):%0A self.val = x%0A self.next = None%0A%0A%0Aclass Solution:%0A # @param %7BListNode%7D l1%0A # @param %7BListNode%7D l2%0A # @return %7BListNode%7D%0A def addTwoNumbers(self, l1, l2):%0A new_root = None%0A%0A temp_l1, temp_l2 = l1, l2%0A temp = new_root%0A extra_digit = 0%0A while None not in %5Btemp_l1, temp_l2%5D:%0A value = temp_l1.val + temp_l2.val + extra_digit%0A if temp is None:%0A temp = ListNode(value)%0A new_root = temp%0A else:%0A new_node = ListNode(value)%0A temp.next = new_node%0A temp = new_node%0A if temp.val %3E= 10:%0A temp.val -= 10%0A extra_digit = 1%0A else:%0A extra_digit = 0%0A temp_l1 = temp_l1.next%0A temp_l2 = temp_l2.next%0A%0A continue_temp = temp_l1 if temp_l1 is not None else temp_l2%0A while continue_temp is not None:%0A value = continue_temp.val + extra_digit%0A new_node = ListNode(value)%0A temp.next = new_node%0A temp = new_node%0A if temp.val %3E= 10:%0A temp.val -= 10%0A extra_digit = 1%0A else:%0A extra_digit = 0%0A continue_temp = continue_temp.next%0A%0A if extra_digit %3E= 1:%0A new_node = ListNode(extra_digit)%0A temp.next = new_node%0A temp = new_node%0A%0A return new_root%0A
e46da8f316485c7c9e11ffe751108539f9254a68
Create ClientUDP.py
ClientUDP.py
ClientUDP.py
Python
0
@@ -0,0 +1,2738 @@ +'''%0ACreated on 12 Feb 2015%0A%0A@author: shutebt01%0A'''%0A#!/bin/env/python3%0A'''%0APacket formating:%0A %5Btype, src-name, src-group, data%5D%0A'''%0A%0Aimport socket, threading, json%0A%0Aname = input(%22Enter User Name: %22)%0Aport = 16500%0A#host = input(%22Enter host: %22)%0Aroom = %22Global%22%0A%0Ashowall = False%0A%0As = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)%0As.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) %0As.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) %0As.bind(('', port)) %0A#s.connect((host, port))%0A%0Aclass InputThread(threading.Thread):%0A def __init__(self):%0A threading.Thread.__init__(self, target=self.input, name=%22Thread-Input%22)%0A %0A def input(self):%0A global room%0A while True:%0A inp = input()%0A data = None%0A if not(inp.startswith('!')):%0A #assumes its a message if not a command%0A data = json.dumps(%5B%22Message%22, name, room, inp%5D)%0A else:%0A # Creates initial packet with data for tracking%0A packet = %5B%22Event%22, name, room%5D%0A split = inp.split(' ', 1)%0A if split%5B0%5D == %22!pm%22:%0A pmsplit = split%5B1%5D.split(' ', 1)%0A #TODO implement better validation%0A if (len(split) == 2):%0A #Adds data to packet%0A packet.append(%22pm%22)%0A packet.append(pmsplit%5B0%5D)%0A packet.append(pmsplit%5B1%5D)%0A data = json.dumps(packet)%0A if split%5B0%5D == %22!room%22:%0A room = split%5B1%5D%0A print(%22You changed to room:%22 + room)%0A if split%5B0%5D == %22!broadcast%22 or split%5B0%5D == %22!bcast%22:%0A msg = split%5B1%5D%0A packet.append(%22bcast%22)%0A packet.append(msg)%0A data = json.dumps(packet)%0A if data:%0A s.sendto(data.encode(%22ascii%22), (%22%3Cbroadcast%3E%22, port))%0A%0Aclass OutputThread(threading.Thread):%0A def __init__(self):%0A threading.Thread.__init__(self, target=self.output, name=%22Thread-Output%22)%0A %0A def output(self):%0A while True:%0A data = s.recv(2048).decode(%22ascii%22)%0A array = json.loads(data)%0A if array%5B0%5D == %22Message%22:%0A if array%5B2%5D == room:%0A print(array%5B1%5D + %22 (%22 + array%5B2%5D + %22):%22 + array%5B3%5D)%0A elif array%5B0%5D == %22Event%22:%0A if array%5B3%5D == %22pm%22 and array%5B4%5D == name:%0A print(array%5B1%5D + %22 (%22 + array%5B2%5D + %22) -%3E You: %22 + array%5B5%5D)%0A elif array%5B3%5D == %22bcast%22:%0A print(array%5B1%5D + %22 (%22 + %22*%22 + %22):%22 + array%5B4%5D)%0A%0AInp = InputThread()%0AInp.start()%0AOut = OutputThread()%0AOut.start()%0A
bf4b4ae886d8b631c443d1d992159f1922232dca
Create code.py
Code/code.py
Code/code.py
Python
0.000001
@@ -0,0 +1,657 @@ +from django.db import models%0A%0Aclass Address(models.Model):%0A address=models.CharField(max_length=255,blank=True)%0A city=models.CharField(max_length=150,blank=True)%0A state=models.CharField(max_length=2,blank=True) // Such as US for Unitet States of America, IN for India%0A pin=models.CharField(max_length=15,blank=True)%0A %0Aclass Contact(models.Model):%0A first_name=models.CharField(max_length=255,blank=True)%0A last_name=models.CharField(max_length=255,blank=True)%0A email=models.EmailField(blank=True)%0A phone=models.CharField(max_length=150,blank=True)%0A birthdate=models.CharField(auto_now_add=True)%0A %0A address=models.ForeignKey(Address,null=True)%0A
67596d081059a004e5f7ab15f7972773fdf2f15e
ADD PyGrid SetupService message tests
tests/syft/grid/messages/setup_msg_test.py
tests/syft/grid/messages/setup_msg_test.py
Python
0
@@ -0,0 +1,2964 @@ +# syft absolute%0Aimport syft as sy%0Afrom syft.core.io.address import Address%0Afrom syft.grid.messages.setup_messages import CreateInitialSetUpMessage%0Afrom syft.grid.messages.setup_messages import CreateInitialSetUpResponse%0Afrom syft.grid.messages.setup_messages import GetSetUpMessage%0Afrom syft.grid.messages.setup_messages import GetSetUpResponse%0A%0A%0Adef test_create_initial_setup_message_serde() -%3E None:%0A bob_vm = sy.VirtualMachine(name=%22Bob%22)%0A target = Address(name=%22Alice%22)%0A%0A request_content = %7B %0A %22settings%22: %7B %0A %22cloud-admin-token%22 : %22d84we35ad3a1d59a84sd9%22,%0A %22cloud-credentials%22: %22%3Ccloud-credentials.pem%3E%22,%0A %22infra%22: %7B%0A %22autoscaling%22: True,%0A %22triggers%22: %7B%0A %22memory%22: %2250%22,%0A %22vCPU%22: %2280%22%0A %7D%0A %7D,%0A %7D%0A %7D%0A msg = CreateInitialSetUpMessage(%0A address=target,%0A content= request_content,%0A reply_to=bob_vm.address,%0A )%0A%0A blob = msg.serialize()%0A msg2 = sy.deserialize(blob=blob)%0A%0A assert msg.id == msg2.id%0A assert msg.address == target%0A assert msg.content == msg2.content%0A assert msg == msg2%0A%0Adef test_create_initial_setup_response_serde() -%3E None:%0A target = Address(name=%22Alice%22)%0A%0A request_content = %7B%22msg%22: %22Initial setup registered successfully!%22%7D%0A msg = CreateInitialSetUpResponse(%0A address=target,%0A success=True,%0A content= request_content,%0A )%0A%0A blob = msg.serialize()%0A msg2 = sy.deserialize(blob=blob)%0A%0A assert msg.id == msg2.id%0A assert msg.address == target%0A assert msg.content == msg2.content%0A assert msg == msg2%0A%0Adef test_get_initial_setup_message_serde() -%3E None:%0A bob_vm = sy.VirtualMachine(name=%22Bob%22)%0A target = Address(name=%22Alice%22)%0A%0A request_content = %7B%7D%0A msg = GetSetUpMessage(%0A address=target,%0A content=request_content,%0A reply_to=bob_vm.address,%0A )%0A%0A blob = msg.serialize()%0A msg2 = sy.deserialize(blob=blob)%0A%0A assert msg.id == msg2.id%0A assert msg.address == target%0A assert msg.content == msg2.content%0A assert msg == msg2%0A%0Adef test_delete_worker_response_serde() -%3E None:%0A target = Address(name=%22Alice%22)%0A%0A content = %7B %0A %22settings%22: %7B %0A %22cloud-admin-token%22 : %22d84we35ad3a1d59a84sd9%22,%0A %22cloud-credentials%22: %22%3Ccloud-credentials.pem%3E%22,%0A %22infra%22: %7B%0A %22autoscaling%22: True,%0A %22triggers%22: %7B%0A %22memory%22: %2250%22,%0A %22vCPU%22: %2280%22%0A %7D%0A %7D,%0A %7D%0A %7D%0A msg = GetSetUpResponse(%0A success=True,%0A address=target,%0A content=content,%0A )%0A%0A blob = msg.serialize()%0A msg2 = sy.deserialize(blob=blob)%0A%0A assert msg.id == msg2.id%0A assert msg.address == target%0A assert msg.content == msg2.content%0A assert msg == msg2
08b52fab8c561834996bd23a5a7654bfac7ea75e
Fix MR comments (NC-1168)
nodeconductor/cost_tracking/filters.py
nodeconductor/cost_tracking/filters.py
from __future__ import unicode_literals from django.contrib.contenttypes.models import ContentType from django.db.models import Q import django_filters from rest_framework import filters from nodeconductor.core import filters as core_filters from nodeconductor.cost_tracking import models, serializers from nodeconductor.structure import models as structure_models, SupportedServices class PriceEstimateFilter(django_filters.FilterSet): is_manually_input = django_filters.BooleanFilter() class Meta: model = models.PriceEstimate fields = [ 'is_manually_input', ] class PriceEstimateScopeFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return models.PriceEstimate.get_estimated_models() def get_field_name(self): return 'scope' class AdditionalPriceEstimateFilterBackend(filters.BaseFilterBackend): def filter_queryset(self, request, queryset, view): if 'date' in request.query_params: date_serializer = serializers.PriceEstimateDateFilterSerializer( data={'date_list': request.query_params.getlist('date')}) date_serializer.is_valid(raise_exception=True) query = Q() for year, month in date_serializer.validated_data['date_list']: query |= Q(year=year, month=month) queryset = queryset.filter(query) # Filter by date range date_range_serializer = serializers.PriceEstimateDateRangeFilterSerializer(data=request.query_params) date_range_serializer.is_valid(raise_exception=True) if 'start' in date_range_serializer.validated_data: year, month = date_range_serializer.validated_data['start'] queryset = queryset.filter(Q(year__gt=year) | Q(year=year, month__gte=month)) if 'end' in date_range_serializer.validated_data: year, month = date_range_serializer.validated_data['end'] queryset = queryset.filter(Q(year__lt=year) | Q(year=year, month__lte=month)) # Filter by customer if 'customer' in request.query_params: customer_uuid = request.query_params['customer'] qs = Q() for model in models.PriceEstimate.get_estimated_models(): content_type = ContentType.objects.get_for_model(model) if model == structure_models.Customer: query = {'uuid': customer_uuid} else: query = {model.Permissions.customer_path + '__uuid': customer_uuid} ids = model.objects.filter(**query).values_list('pk', flat=True) qs |= Q(content_type=content_type, object_id__in=ids) queryset = queryset.filter(qs) return queryset class PriceListItemServiceFilterBackend(core_filters.GenericKeyFilterBackend): def get_related_models(self): return structure_models.Service.get_all_models() def get_field_name(self): return 'service' class DefaultPriceListItemFilter(django_filters.FilterSet): resource_type = core_filters.ContentTypeFilter( name='resource_content_type', models=SupportedServices.get_resource_models().values()) class Meta: model = models.DefaultPriceListItem fields = [ 'key', 'item_type', 'resource_type', ]
Python
0
@@ -379,16 +379,68 @@ ervices%0A +from nodeconductor.structure.models import Resource%0A %0A%0Aclass @@ -3245,53 +3245,30 @@ els= -SupportedServi +Resour ce -s .get_ -resource_models().value +all_model s())
cb707785cb165e8570aa8201d2e71b5ed2c2f3bd
Use new rebot model
utest/webcontent/spec/data/create_jsdata_for_specs.py
utest/webcontent/spec/data/create_jsdata_for_specs.py
#!/usr/bin/env python import fileinput from os.path import join, dirname, abspath import sys import os BASEDIR = dirname(abspath(__file__)) OUTPUT = join(BASEDIR, 'output.xml') sys.path.insert(0, join(BASEDIR, '..', '..', '..', '..', 'src')) import robot from robot.reporting.outputparser import OutputParser from robot.reporting.jsondatamodel import SeparatingWriter def run_robot(testdata, loglevel='INFO'): robot.run(testdata, log='NONE', report='NONE', tagstatlink=['force:http://google.com:<kuukkeli&gt;', 'i*:http://%1/:Title of i%1'], tagdoc=['test:this_is_*my_bold*_test', 'IX:*Combined* & escaped <&lt; tag doc'], tagstatcombine=['fooANDi*:zap', 'i?:IX'], critical=[], noncritical=[], outputdir=BASEDIR, loglevel=loglevel) def create_jsdata(outxml, target, split_log): model = OutputParser(split_log=split_log).parse(outxml) model.set_settings({'logURL': 'log.html', 'reportURL': 'report.html', 'background': {'fail': 'DeepPink'}}) with open(target, 'w') as output: model.write_to(output) for index, (keywords, strings) in enumerate(model._split_results): writer = SeparatingWriter(output, '') writer.dump_json('window.outputKeywords%d = ' % index, keywords) writer.dump_json('window.outputStrings%d = ' % index, strings) def replace_all(file,searchExp,replaceExp): for line in fileinput.input(file, inplace=1): if searchExp in line: line = line.replace(searchExp,replaceExp) sys.stdout.write(line) def create(input, target, targetName, loglevel='INFO', split_log=False): input = join(BASEDIR, input) target = join(BASEDIR, target) run_robot(input, loglevel) create_jsdata(OUTPUT, target, split_log) replace_all(target, 'window.output', 'window.' + targetName) if __name__ == '__main__': create('Suite.txt', 'Suite.js', 'suiteOutput') create('SetupsAndTeardowns.txt', 'SetupsAndTeardowns.js', 'setupsAndTeardownsOutput') create('Messages.txt', 'Messages.js', 'messagesOutput') create('teardownFailure', 'TeardownFailure.js', 'teardownFailureOutput') create(join('teardownFailure', 'PassingFailing.txt'), 'PassingFailing.js', 'passingFailingOutput') create('TestsAndKeywords.txt', 'TestsAndKeywords.js', 'testsAndKeywordsOutput') create('.', 'allData.js', 'allDataOutput') create('.', 'splitting.js', 'splittingOutput', split_log=True) os.remove(OUTPUT)
Python
0
@@ -97,16 +97,69 @@ port os%0A +from robot.result.datamodel import DatamodelVisitor%0A%0A %0ABASEDIR @@ -417,16 +417,33 @@ ngWriter +, DataModelWriter %0A%0A%0Adef r @@ -966,62 +966,198 @@ -model = OutputParser(split_log=split_log).parse(outxml +result = robot.result.builders.ResultFromXML(outxml)%0A visitor = DatamodelVisitor(result, split_log=split_log)%0A model = DataModelWriter(visitor.datamodel, visitor._context.split_results )%0A
3608c4d3b559ba7fa1bd9629231e98196681caa4
add package py-gdbgui (#7715)
var/spack/repos/builtin/packages/py-gdbgui/package.py
var/spack/repos/builtin/packages/py-gdbgui/package.py
Python
0
@@ -0,0 +1,2059 @@ +##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass PyGdbgui(PythonPackage):%0A %22%22%22gdbgui is a modern, free, browser-based frontend to gdb%22%22%22%0A%0A homepage = %22https://gdbgui.com%22%0A url = %22https://pypi.io/packages/source/g/gdbgui/gdbgui-0.11.2.1.tar.gz%22%0A%0A version('0.11.2.1', 'c15dd3f60fe372b2e93f705c7ee75f51')%0A%0A depends_on('py-setuptools', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('gdb', type='run')%0A
8355cb358d14589a194926d37beeb5af7af2a591
Increase event image url limit from 200
falmer/events/migrations/0012_auto_20170905_1208.py
falmer/events/migrations/0012_auto_20170905_1208.py
Python
0.000114
@@ -0,0 +1,609 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.3 on 2017-09-05 11:08%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('events', '0011_auto_20170905_1028'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='mslevent',%0A name='image_url',%0A field=models.URLField(max_length=2000),%0A ),%0A migrations.AlterField(%0A model_name='mslevent',%0A name='url',%0A field=models.URLField(max_length=2000),%0A ),%0A %5D%0A
a16b4401f37f08d8cb5e1f9ec1b7d4a3221360ab
Add test case for regular extrusion
test/test_regular_extrusion.py
test/test_regular_extrusion.py
Python
0
@@ -0,0 +1,922 @@ +# -*- coding: utf-8 -*-%0A%22%22%22Creates regular cube mesh by extrusion.%0A%22%22%22%0Aimport pygmsh%0A%0Afrom helpers import compute_volume%0A%0A%0Adef test():%0A x = 5%0A y = 4%0A z = 3%0A x_layers = 10%0A y_layers = 5%0A z_layers = 3%0A geom = pygmsh.built_in.Geometry()%0A p = geom.add_point(%5B0, 0, 0%5D, 1)%0A _, l, _ = geom.extrude(p, %5Bx, 0, 0%5D, num_layers=x_layers)%0A _, s, _ = geom.extrude(l, %5B0, y, 0%5D, num_layers=y_layers)%0A geom.extrude(s, %5B0, 0, z%5D, num_layers=z_layers)%0A points, cells, _, _, _ = pygmsh.generate_mesh(geom)%0A%0A ref_vol = x * y * z%0A assert abs(compute_volume(points, cells) - ref_vol) %3C 1.0e-2 * ref_vol%0A%0A # Each grid-cell from layered extrusion will result in 6 tetrahedrons.%0A ref_tetras = 6 * x_layers * y_layers * z_layers%0A assert len(cells%5B%22tetra%22%5D) == ref_tetras%0A%0A return points, cells%0A%0A%0Aif __name__ == %22__main__%22:%0A import meshio%0A%0A meshio.write_points_cells(%22cube.vtu%22, *test())%0A
5c7b70061d6e96619c6e3a40c87aaec39b408fdf
Enable subscription admin area
subscription/admin.py
subscription/admin.py
Python
0
@@ -0,0 +1,195 @@ +from django.contrib import admin%0Afrom subscription.models import MessageSet, Message, Subscription%0A%0Aadmin.site.register(MessageSet)%0Aadmin.site.register(Message)%0Aadmin.site.register(Subscription)%0A
78552d3de73174dd0ffdf9a58009281013dcf640
make interface consistent for add_sample
rlkit/data_management/replay_buffer.py
rlkit/data_management/replay_buffer.py
import abc class ReplayBuffer(object, metaclass=abc.ABCMeta): """ A class used to save and replay data. """ @abc.abstractmethod def add_sample(self, observation, action, reward, next_observation, terminal, **kwargs): """ Add a transition tuple. """ pass @abc.abstractmethod def terminate_episode(self): """ Let the replay buffer know that the episode has terminated in case some special book-keeping has to happen. :return: """ pass @abc.abstractmethod def num_steps_can_sample(self, **kwargs): """ :return: # of unique items that can be sampled. """ pass def add_path(self, path): """ Add a path to the replay buffer. This default implementation naively goes through every step, but you may want to optimize this. NOTE: You should NOT call "terminate_episode" after calling add_path. It's assumed that this function handles the episode termination. :param path: Dict like one outputted by rlkit.samplers.util.rollout """ for i, ( obs, action, reward, next_obs, terminal, agent_info, env_info ) in enumerate(zip( path["observations"], path["actions"], path["rewards"], path["next_observations"], path["terminals"], path["agent_infos"], path["env_infos"], )): self.add_sample( obs, action, reward, next_obs, terminal, agent_info=agent_info, env_info=env_info, ) self.terminate_episode() @abc.abstractmethod def random_batch(self, batch_size): """ Return a batch of size `batch_size`. :param batch_size: :return: """ pass
Python
0
@@ -194,32 +194,24 @@ reward, -next_observation +terminal ,%0A @@ -223,24 +223,32 @@ -terminal +next_observation , **kwar
7bd6f3e7751deecfc3cd555fc071d722c856802c
Implement division using built in library function
chips/compiler/builtins.py
chips/compiler/builtins.py
Python
0.000001
@@ -0,0 +1,2288 @@ +#!/usr/bin/env python%0A%22%22%22Support Library for builtin Functionality%22%22%22%0A%0A__author__ = %22Jon Dawson%22%0A__copyright__ = %22Copyright (C) 2013, Jonathan P Dawson%22%0A__version__ = %220.1%22%0A%0Abuiltins=%22%22%22%0A%0Aunsigned unsigned_divide_xxxx(unsigned dividend, unsigned divisor)%7B%0A unsigned denom = divisor;%0A unsigned bit = 1;%0A unsigned quotient = 0;%0A if( denom %3E dividend ) return 0;%0A if( denom == dividend ) return 1;%0A while(denom %3C= dividend)%7B%0A denom %3C%3C= 1;%0A bit %3C%3C= 1;%0A %7D%0A denom %3E%3E= 1;%0A bit %3E%3E= 1;%0A while(bit)%7B%0A if(dividend %3E= denom)%7B%0A dividend -= denom;%0A quotient %7C= bit;%0A %7D%0A bit %3E%3E= 1;%0A denom %3E%3E= 1;%0A %7D%0A return quotient;%0A%7D%0A%0Aint divide_xxxx(int dividend, int divisor)%7B%0A unsigned udividend, udivisor, uquotient;%0A unsigned dividend_sign, divisor_sign, quotient_sign;%0A dividend_sign = dividend & 0x8000u;%0A divisor_sign = divisor & 0x8000u;%0A quotient_sign = dividend_sign %5E divisor_sign;%0A udividend = dividend_sign ? -dividend : dividend;%0A udivisor = divisor_sign ? -divisor : divisor;%0A uquotient = unsigned_divide_xxxx(udividend, udivisor);%0A return quotient_sign ? -uquotient : uquotient;%0A%7D%0A%0Along unsigned long_unsigned_divide_xxxx(long unsigned dividend, long unsigned divisor)%7B%0A long unsigned denom = divisor;%0A long unsigned bit = 1;%0A long unsigned quotient = 0;%0A if( denom %3E dividend ) return 0;%0A if( denom == dividend ) return 1;%0A while(denom %3C= dividend)%7B%0A denom %3C%3C= 1;%0A bit %3C%3C= 1;%0A %7D%0A denom %3E%3E= 1;%0A bit %3E%3E= 1;%0A while(bit)%7B%0A if(dividend %3E= denom)%7B%0A dividend -= denom;%0A quotient %7C= bit;%0A %7D%0A bit %3E%3E= 1;%0A denom %3E%3E= 1;%0A %7D%0A return quotient;%0A%7D%0A%0Along int long_divide_xxxx(long int dividend, long int divisor)%7B%0A long unsigned udividend, udivisor, uquotient;%0A long unsigned dividend_sign, divisor_sign, quotient_sign;%0A dividend_sign = dividend & 0x80000000ul;%0A divisor_sign = divisor & 0x80000000ul;%0A quotient_sign = dividend_sign %5E divisor_sign;%0A udividend = dividend_sign ? -dividend : dividend;%0A udivisor = divisor_sign ? -divisor : divisor;%0A uquotient = long_unsigned_divide_xxxx(udividend, udivisor);%0A return quotient_sign ? -uquotient : uquotient;%0A%7D%0A%0A%22%22%22%0A
6eb49ae8dcf33a7d7be9ed3c8208bc83a9a46757
Create python_wiki_one.py
ckOLDab/python_wiki_one.py
ckOLDab/python_wiki_one.py
Python
0.999848
@@ -0,0 +1,1222 @@ +import time%0Aimport BaseHTTPServer%0A%0A%0AHOST_NAME = '127.0.0.1' # !!!REMEMBER TO CHANGE THIS!!!%0APORT_NUMBER = 80 # Maybe set this to 9000.%0A%0A%0Aclass MyHandler(BaseHTTPServer.BaseHTTPRequestHandler):%0A def do_HEAD(s):%0A s.send_response(200)%0A s.send_header(%22Content-type%22, %22text/html%22)%0A s.end_headers()%0A def do_GET(s):%0A %22%22%22Respond to a GET request.%22%22%22%0A s.send_response(200)%0A s.send_header(%22Content-type%22, %22text/html%22)%0A s.end_headers()%0A s.wfile.write(%22%3Chtml%3E%3Chead%3E%3Ctitle%3ETitle goes here.%3C/title%3E%3C/head%3E%22)%0A s.wfile.write(%22%3Cbody%3E%3Cp%3EThis is a test.%3C/p%3E%22)%0A # If someone went to %22http://something.somewhere.net/foo/bar/%22,%0A # then s.path equals %22/foo/bar/%22.%0A s.wfile.write(%22%3Cp%3EYou accessed path: %25s%3C/p%3E%22 %25 s.path)%0A s.wfile.write(%22%3C/body%3E%3C/html%3E%22)%0A%0Aif __name__ == '__main__':%0A server_class = BaseHTTPServer.HTTPServer%0A httpd = server_class((HOST_NAME, PORT_NUMBER), MyHandler)%0A print time.asctime(), %22Server Starts - %25s:%25s%22 %25 (HOST_NAME, PORT_NUMBER)%0A try:%0A httpd.serve_forever()%0A except KeyboardInterrupt:%0A pass%0A httpd.server_close()%0A print time.asctime(), %22Server Stops - %25s:%25s%22 %25 (HOST_NAME, PORT_NUMBER)%0A
93cd3dc8c4d37e2e83f6f6e09273576297e822b9
Version 0.3.0 - alpha
stab/__init__.py
stab/__init__.py
Python
0.000001
@@ -0,0 +1,45 @@ +__version__ = '0.3.0'%0A%0Afrom stab import stab%0A
53cdd6e7bcb37567382f3b3688b6a55f1b2968be
Add test_binaryclassifier
tests/test_binaryclassifier.py
tests/test_binaryclassifier.py
Python
0.000015
@@ -0,0 +1,898 @@ +%0Aimport numpy as np%0Afrom sklearn import svm, datasets%0A%0Afrom darwin.pipeline import ClassificationPipeline%0A%0A%0Adef test_binary_classification_with_classification_pipeline():%0A # generate the dataset%0A n_samples=100%0A n_features=20%0A x, y = datasets.make_gaussian_quantiles(mean=None, cov=1.0, n_samples=n_samples,%0A n_features=n_features, n_classes=2,%0A shuffle=True, random_state=1)%0A%0A # another way to generate the data%0A # x, y = datasets.make_hastie_10_2(n_samples=10, random_state=1)%0A%0A # -- test with darwin%0A classifier_name='linsvm'%0A cvmethod='10'%0A n_feats = x.shape%5B1%5D%0A%0A pipe = ClassificationPipeline(n_feats=n_feats, clfmethod=classifier_name,%0A cvmethod=cvmethod)%0A%0A results, metrics = pipe.cross_validation(x, y)%0A%0A assert(results is not None)%0A
bb80025f3ed8169a2558e9c5c6bc4db5a862d7ae
Integrate LLVM at llvm/llvm-project@529a3d87a799
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "6139626d738fd03b968e07698f5cd26924e3cd65" LLVM_SHA256 = "b9581ac44a7d9dd3a8470497cddb63c387ab6520d82d077fb609bc29b4e7b887" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], build_file = "//third_party/llvm:llvm.BUILD", patch_file = [ "//third_party/llvm:infer_type.patch", # TODO(b/231285230): remove once resolved "//third_party/llvm:build.patch", "//third_party/llvm:macos_build_fix.patch", ], link_files = {"//third_party/llvm:run_lit.sh": "mlir/run_lit.sh"}, )
Python
0.000001
@@ -160,133 +160,133 @@ = %22 -6139626d738fd03b968e07698f5cd26924e3cd65%22%0A LLVM_SHA256 = %22b9581ac44a7d9dd3a8470497cddb63c387ab6520d82d077fb609bc29b4e7b887 +529a3d87a799a2cba29bc1d0f426a00d5bb4c88f%22%0A LLVM_SHA256 = %223d2ca52bd36ad3904f2f5d0e43935b0e82f3d1ac137e0a89025141e36735944f %22%0A%0A
fee6e923b27947721ba1ea4ce5005f54eabba421
Integrate LLVM at llvm/llvm-project@4504e1134c91
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "0ad1d9fdf22dad41312e02b8bc990bf58ce1744c" LLVM_SHA256 = "517db6d771cf24d9f0aea6d4fdd59591347c7eb9d86ef58521fe8cb929fbe82b" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-" + LLVM_COMMIT, urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], link_files = { "//third_party/llvm:llvm.autogenerated.BUILD": "llvm/BUILD", "//third_party/mlir:BUILD": "mlir/BUILD", "//third_party/mlir:build_defs.bzl": "mlir/build_defs.bzl", "//third_party/mlir:linalggen.bzl": "mlir/linalggen.bzl", "//third_party/mlir:tblgen.bzl": "mlir/tblgen.bzl", "//third_party/mlir:test.BUILD": "mlir/test/BUILD", }, )
Python
0.000003
@@ -160,47 +160,47 @@ = %22 -0ad1d9fdf22dad413 +4504e1134c9118f3c322685f8a90 12 +9 e0 -2b8bc990bf58ce1744 +9bab92 c%22%0A @@ -221,72 +221,72 @@ = %22 -517db6d771cf24d9f0aea6d4fdd59591347c7eb9d86ef58521fe8cb929fbe82b +2b1d7a96ff37600cae12d2ed51b9f0554b1bbc6511ffe51ac7525928b29bab44 %22%0A%0A
a868b0d057b34dbd487a1e3d2b08d5489651b3ff
Integrate LLVM at llvm/llvm-project@fe611b1da84b
third_party/llvm/workspace.bzl
third_party/llvm/workspace.bzl
"""Provides the repository macro to import LLVM.""" load("//third_party:repo.bzl", "tf_http_archive") def repo(name): """Imports LLVM.""" LLVM_COMMIT = "bd7ece4e063e7afd08cbaa311878c09aadf5ec21" LLVM_SHA256 = "e9390dfa94c1143f35437bea8a011b030194e047bc3df45e2627cff88f83d2ed" tf_http_archive( name = name, sha256 = LLVM_SHA256, strip_prefix = "llvm-project-{commit}".format(commit = LLVM_COMMIT), urls = [ "https://storage.googleapis.com/mirror.tensorflow.org/github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), "https://github.com/llvm/llvm-project/archive/{commit}.tar.gz".format(commit = LLVM_COMMIT), ], build_file = "//third_party/llvm:BUILD.bazel", )
Python
0.000006
@@ -160,133 +160,133 @@ = %22 -bd7ece4e063e7afd08cbaa311878c09aadf5ec21%22%0A LLVM_SHA256 = %22e9390dfa94c1143f35437bea8a011b030194e047bc3df45e2627cff88f83d2ed +fe611b1da84b9442c093739394d336af9e99c1a1%22%0A LLVM_SHA256 = %2252edc892b020736f4c53e52b63687ee7caab93c90a0062b4026f4d49fc18266f %22%0A%0A
6b01692fcdaf0dc01bb32eefcf883200d2864f60
make paths work independent of cursor
statscraper/scrapers/PXWebScraper.py
statscraper/scrapers/PXWebScraper.py
# encoding: utf-8 """ A wrapper around the PX-Web API. As implementations and versions vary, this is best used as a base class, for more specific scrapers to extend. If used directly, an API endpoint must be set: scraper = PXWeb(base_url="http://api.example.com/") # ...or: scraper = PXWeb() scraper.base_url = "http://api.example.com/" """ try: from simplejson.scanner import JSONDecodeError except ImportError: from json.decoder import JSONDecodeError import requests from statscraper import (BaseScraper, Collection, Dataset, Dimension, InvalidData) class PXWeb(BaseScraper): base_url = None # API endpoint @BaseScraper.on("init") def _get_args(self, *args, **kwargs): """ Store `base_url`, if given on init. This is convenient when the PXWeb scraper is used directly by an end user. """ if "base_url" in kwargs and kwargs["base_url"]: self.base_url = kwargs["base_url"] @property def _api_path(self): """Get the API path for the current cursor position.""" if self.base_url is None: raise NotImplementedError("base_url not set") path = "/".join([x.blob["id"] for x in self.current_item.path]) return "/".join([self.base_url, path]) def _fetch_itemslist(self, item): data = requests.get(self._api_path).json() for d in data: if d["type"] == "l": yield Collection(d["text"], blob=d) else: yield Dataset(d["text"], blob=d) def _fetch_dimensions(self, dataset): data = requests.get(self._api_path).json() try: for d in data["variables"]: yield Dimension(d["code"], label=d["text"], allowed_values=d["values"]) except KeyError: yield None def _fetch_data(self, dataset, query): if query is None: query = {} body = { 'query': [{ 'code': key, 'selection': { 'filter': "item", # value can be a list or a value 'values': value if isinstance(value, list) else [value] } } for key, value in query.iteritems()], 'response': { 'format': "json" } } try: raw = requests.post(self._api_path, json=body) data = raw.json() except JSONDecodeError: raise InvalidData("No valid response from PX Web. Check your query for spelling errors.") return data["data"]
Python
0.000003
@@ -1016,22 +1016,8 @@ %22%5D%0A%0A - @property%0A @@ -1034,16 +1034,22 @@ ath(self +, item ):%0A @@ -1250,21 +1250,8 @@ in -self.current_ item @@ -1378,32 +1378,38 @@ t(self._api_path +(item) ).json()%0A%0A @@ -1620,16 +1620,16 @@ taset):%0A - @@ -1662,16 +1662,25 @@ api_path +(dataset) ).json() @@ -2506,16 +2506,25 @@ api_path +(dataset) , json=b
a973c3b5d1683cd27f8b88c4c38daa934dae75c1
refactor storeRegiser
usermanage/views/storeRegister.py
usermanage/views/storeRegister.py
Python
0.99987
@@ -0,0 +1,1286 @@ +from django.shortcuts import render, redirect%0Afrom django.http import HttpResponseRedirect%0Afrom django.contrib.auth import login, authenticate, logout%0Afrom django.contrib.auth.models import User, Group%0Afrom django.contrib.auth.decorators import login_required, user_passes_test, permission_required%0Afrom django.contrib.auth.forms import UserCreationForm%0Afrom customermanage.models import Coupon, Wallet%0Afrom storemanage.models import Ticket%0A# Create your views here.%0Afrom usermanage import models%0A%0Adef storeRegister(request):%0A if request.user.is_authenticated:%0A return redirect('index:index')%0A if request.method == 'GET':%0A return render(request,'usermanage/register-store.html')%0A data = request.POST%0A%0A # check user already exits%0A if User.objects.filter(username=data%5B'username'%5D).exists():%0A return render(request,'usermanage/register-store.html', %7B%0A 'user_error' : True,%0A %7D)%0A%0A user = User.objects.create_user(data%5B'username'%5D, password = data%5B'password'%5D)%0A g = Group.objects.get(name='store')%0A g.user_set.add(user)%0A user.save()%0A g.save()%0A storeprofile = models.Store(user = user, store_name=data%5B'storename'%5D, profile_image_url=data%5B'profile_image_url'%5D)%0A storeprofile.save()%0A return redirect_after_login(user)%0A
9316bc07c77e2f51332a40bf430cef117f4d89e1
Add script to check for Dockerfile coverage
util/check_dockerfile_coverage.py
util/check_dockerfile_coverage.py
Python
0
@@ -0,0 +1,2410 @@ +import yaml%0Aimport os%0Aimport pathlib2%0Aimport itertools%0Aimport argparse%0Aimport logging%0Aimport sys%0A%0ATRAVIS_BUILD_DIR = os.environ.get(%22TRAVIS_BUILD_DIR%22)%0ACONFIG_FILE_PATH = pathlib2.Path(TRAVIS_BUILD_DIR, %22util%22, %22parsefiles_config.yml%22)%0ALOGGER = logging.getLogger(__name__)%0A%0Adef check_coverage(containers):%0A # open config file containing container weights%0A config_file_path = pathlib2.Path(CONFIG_FILE_PATH)%0A%0A with (config_file_path.open(mode='r')) as file:%0A try:%0A config = yaml.load(file)%0A except yaml.YAMLError, exc:%0A LOGGER.error(%22error in configuration file: %25s%22 %25 str(exc))%0A sys.exit(1)%0A%0A # get container weights%0A weights = config.get(%22weights%22)%0A%0A # convert all containers in config file to a list of tuples (%3Ccontainer%3E, %3Cweight%3E)%0A weights_list = %5Bx.items() for x in weights%5D%0A weights_list = list(itertools.chain.from_iterable(weights_list))%0A%0A # performs intersection between weighted containers and input containers%0A used_containers = %5Bx for x in weights_list if x%5B0%5D in containers%5D%0A%0A # determine which Dockerfiles are not covered; i.e. the set difference of the Dockerfiles to build minus the Dockerfile%0A # available to be built is non-empty%0A uncovered = set(containers) - set(%5Bx%5B0%5D for x in used_containers%5D)%0A%0A # exit with error code if uncovered Dockerfiles exist%0A if uncovered:%0A LOGGER.error(%22The following Dockerfiles are not described in the parsefiles_config.yml file: %7B%7D. Please see the following documentation on how to add Dockerfile ranks to the configuration file: %7B%7D%22.format(uncovered, %22https://github.com/edx/configuration/blob/master/util/README.md%22))%0A sys.exit(1)%0A%0Adef arg_parse():%0A%0A parser = argparse.ArgumentParser(description = 'Given a list of containers as input and a number of shards, '%0A 'finds an approximation of the optimal distribution of the containers over the shards, provided a set of hard-coded weights '%0A 'in parsefiles_config.yml.')%0A parser.add_argument('containers', help = %22the Dockerfiles that need to be built as the result of some commit change and whose coverage is checked%22)%0A%0A return parser.parse_args()%0A%0Aif __name__ == '__main__':%0A%0A args = arg_parse()%0A%0A # configure logging%0A logging.basicConfig()%0A%0A containers = %5B%5D%0A%0A for word in args.containers.split():%0A containers.append(word)%0A%0A check_coverage(containers)%0A
c78c82987feb4dbe91b750ab90a3d163fc1340c6
Add datatables filter mixin
rhinocloud/contrib/jquery/views.py
rhinocloud/contrib/jquery/views.py
Python
0
@@ -0,0 +1,2169 @@ +from django.views import generic%0Afrom django.db.models import Q%0Afrom django.core.exceptions import ImproperlyConfigured%0A%0A%0Aclass DataTablesServerDataMixin(object):%0A query_class = Q%0A initial_query_kwargs = %7B%7D%0A searchable_fields = ()%0A %0A def get_searchable_fields(self):%0A if self.searchable_fields is not None:%0A fields = self.searchable_fields%0A else:%0A raise ImproperlyConfigured('Provide searchable_fields or override get_searchable_fields().')%0A return fields%0A%0A def get_query_class(self):%0A if self.query_class:%0A qc = self.query_class%0A else:%0A raise ImproperlyConfigured('Provide query_class or override get_query_class().')%0A return qc%0A %0A def get_initial_query_kwargs(self):%0A if self.initial_query_kwargs is not None:%0A kwargs = self.initial_query_kwargs%0A else:%0A raise ImproperlyConfigured('Provide initial_query_kwargs or override get_initial_query_kwargs().')%0A return kwargs%0A %0A def get_initial_query(self):%0A return self.get_query_class()(**self.get_initial_query_kwargs())%0A %0A def get_searchterm_query(self, field, value):%0A return self.get_query_class()(**%7B'%25s__contains' %25 field: value%7D)%0A %0A def get_queryset(self, **kwargs):%0A queryset = super(DataTablesServerDataMixin, self).get_queryset(**kwargs)%0A iSortingCols = int(self.request.GET.get('iSortingCols', -1))%0A sSearch = self.request.GET.get('sSearch', None)%0A %0A if sSearch is not None:%0A query = self.get_initial_query()%0A for field in self.get_searchable_fields():%0A query.add(self.get_searchterm_query(field, sSearch), Q.OR)%0A queryset = queryset.filter(query)%0A %0A ordering = %5B%5D%0A for i in range(iSortingCols):%0A sSortDir = self.request.GET%5B'sSortDir_%25s' %25 i%5D%0A iSortingCols = int(self.request.GET%5B'iSortCol_%25s' %25 i%5D)%0A ordering.append('%25s%25s' %25 (sSortDir == 'asc' and '-' or '', self.get_searchable_fields()%5BiSortingCols%5D))%0A queryset = queryset.order_by(*ordering)%0A return queryset%0A
7a26442d7b219f616062cc60db9cb762a9ac1d37
Add found view controller to pasteboard
commands/FBFindCommands.py
commands/FBFindCommands.py
#!/usr/bin/python # Copyright (c) 2014, Facebook, Inc. # All rights reserved. # # This source code is licensed under the BSD-style license found in the # LICENSE file in the root directory of this source tree. An additional grant # of patent rights can be found in the PATENTS file in the same directory. import os import re import lldb import fblldbbase as fb import fblldbviewcontrollerhelpers as vcHelpers import fblldbobjcruntimehelpers as objc def lldbcommands(): return [ FBFindViewControllerCommand(), FBFindOwningViewControllerCommand(), FBFindViewCommand(), FBFindViewByAccessibilityLabelCommand(), FBTapLoggerCommand(), ] class FBFindViewControllerCommand(fb.FBCommand): def name(self): return 'fvc' def description(self): return 'Find the view controllers whose class names match classNameRegex and puts the address of first on the clipboard.' def args(self): return [ fb.FBCommandArgument(arg='classNameRegex', type='string', help='The view-controller-class regex to search the view controller hierarchy for.') ] def run(self, arguments, options): output = vcHelpers.viewControllerRecursiveDescription('(id)[[UIWindow keyWindow] rootViewController]') printMatchesInViewOutputStringAndCopyFirstToClipboard(arguments[0], output) class FBFindOwningViewControllerCommand(fb.FBCommand): def name(self): return 'fovc' def descriptino(self): return 'Find the view controller that owns the input view.' def args(self): return [ fb.FBCommandArgument(arg='view', type='UIView', help='This function will print the View Controller that owns this view')] def run(self, arguments, options): object = arguments[0] while object: if self.isViewController(object): description = fb.evaluateExpressionValue(object).GetObjectDescription() print("Found the owning view controller.\n{}".format(description)) return else: object = self.nextResponder(object) print("Could not find an owning view controller") @staticmethod def isViewController(object): command = '[(id){} isKindOfClass:[UIViewController class]]'.format(object) isVC = fb.evaluateBooleanExpression(command) return isVC @staticmethod def nextResponder(object): command = '[((id){}) nextResponder]'.format(object) nextResponder = fb.evaluateObjectExpression(command) if int(nextResponder, 0): return nextResponder else: return None class FBFindViewCommand(fb.FBCommand): def name(self): return 'fv' def description(self): return 'Find the views whose class names match classNameRegex and puts the address of first on the clipboard.' def args(self): return [ fb.FBCommandArgument(arg='classNameRegex', type='string', help='The view-class regex to search the view hierarchy for.') ] def run(self, arguments, options): output = fb.evaluateExpressionValue('(id)[[UIWindow keyWindow] recursiveDescription]').GetObjectDescription() printMatchesInViewOutputStringAndCopyFirstToClipboard(arguments[0], output) def printMatchesInViewOutputStringAndCopyFirstToClipboard(needle, haystack): matches = re.findall('.*<.*' + needle + '.*: (0x[0-9a-fA-F]*);.*', haystack, re.IGNORECASE) for match in matches: className = fb.evaluateExpressionValue('(id)[(' + match + ') class]').GetObjectDescription() print('{} {}'.format(match, className)) if len(matches) > 0: cmd = 'echo %s | tr -d "\n" | pbcopy' % matches[0] os.system(cmd) class FBFindViewByAccessibilityLabelCommand(fb.FBCommand): def name(self): return 'fa11y' def description(self): return 'Find the views whose accessibility labels match labelRegex and puts the address of the first result on the clipboard.' def args(self): return [ fb.FBCommandArgument(arg='labelRegex', type='string', help='The accessibility label regex to search the view hierarchy for.') ] def run(self, arguments, options): first = None haystack = fb.evaluateExpressionValue('(id)[[UIWindow keyWindow] recursiveDescription]').GetObjectDescription() needle = arguments[0] allViews = re.findall('.* (0x[0-9a-fA-F]*);.*', haystack) for view in allViews: a11yLabel = fb.evaluateExpressionValue('(id)[(' + view + ') accessibilityLabel]').GetObjectDescription() if re.match(r'.*' + needle + '.*', a11yLabel, re.IGNORECASE): print('{} {}'.format(view, a11yLabel)) if first == None: first = view cmd = 'echo %s | tr -d "\n" | pbcopy' % first os.system(cmd) class FBTapLoggerCommand(fb.FBCommand): def name(self): return 'taplog' def description(self): return 'Log tapped view to the console.' def run(self, arguments, options): parameterExpr = objc.functionPreambleExpressionForObjectParameterAtIndex(0) breakpoint = lldb.debugger.GetSelectedTarget().BreakpointCreateByName("-[UIApplication sendEvent:]") breakpoint.SetCondition('(int)[' + parameterExpr + ' type] == 0 && (int)[[[' + parameterExpr + ' allTouches] anyObject] phase] == 0') breakpoint.SetOneShot(True) lldb.debugger.HandleCommand('breakpoint command add -s python -F "sys.modules[\'' + __name__ + '\'].' + self.__class__.__name__ + '.taplog_callback" ' + str(breakpoint.id)) lldb.debugger.SetAsync(True) lldb.debugger.HandleCommand('continue') @staticmethod def taplog_callback(frame, bp_loc, internal_dict): parameterExpr = objc.functionPreambleExpressionForObjectParameterAtIndex(0) lldb.debugger.HandleCommand('po [[[%s allTouches] anyObject] view]' % (parameterExpr)) # We don't want to proceed event (click on button for example), so we just skip it lldb.debugger.HandleCommand('thread return')
Python
0
@@ -1905,16 +1905,100 @@ ption))%0A + cmd = 'echo %7B%7D %7C tr -d %22%5Cn%22 %7C pbcopy'.format(object)%0A os.system(cmd)%0A
d50378a63d3e2d2385371194dc645970d1836a7c
Fix suggestion slack notification format indexes
controllers/suggestions/suggest_designs_review_controller.py
controllers/suggestions/suggest_designs_review_controller.py
import datetime import os import json import logging from google.appengine.ext import ndb from consts.account_permissions import AccountPermissions from controllers.suggestions.suggestions_review_base_controller import SuggestionsReviewBaseController from helpers.media_manipulator import MediaManipulator from helpers.suggestions.media_creator import MediaCreator from helpers.suggestions.suggestion_notifier import SuggestionNotifier from models.media import Media from models.sitevar import Sitevar from models.suggestion import Suggestion from template_engine import jinja2_engine class SuggestDesignsReviewController(SuggestionsReviewBaseController): def __init__(self, *args, **kw): self.REQUIRED_PERMISSIONS.append(AccountPermissions.REVIEW_DESIGNS) super(SuggestDesignsReviewController, self).__init__(*args, **kw) """ View the list of suggestions. """ def get(self): if self.request.get('action') and self.request.get('id'): # Fast-path review self.verify_permissions() suggestion = Suggestion.get_by_id(self.request.get('id')) status = None if suggestion and suggestion.target_model == 'robot': if suggestion.review_state == Suggestion.REVIEW_PENDING: slack_message = None if self.request.get('action') == 'accept': self._process_accepted(suggestion.key.id()) status = 'accepted' slack_message = "{0} ({1}) accepted the <https://grabcad.com/library/{}|suggestion> for team <https://thebluealliance.com/team/{2}/{3}|{2} in {3}>".format( self.user_bundle.account.display_name, self.user_bundle.account.email, suggestion.contents['foreign_key'], suggestion.contents['reference_key'][3:], suggestion.contents['year'] ) elif self.request.get('action') == 'reject': suggestion.review_state = Suggestion.REVIEW_REJECTED suggestion.reviewer = self.user_bundle.account.key suggestion.reviewed_at = datetime.datetime.now() suggestion.put() status = 'rejected' slack_message = "{0} ({1}) rejected the <https://grabcad.com/library/{}|suggestion> for team <https://thebluealliance.com/team/{2}/{3}|{2} in {3}>".format( self.user_bundle.account.display_name, self.user_bundle.account.email, suggestion.contents['foreign_key'], suggestion.contents['reference_key'][3:], suggestion.contents['year'] ) if slack_message: slack_sitevar = Sitevar.get_or_insert('slack.hookurls') if slack_sitevar: slack_url = slack_sitevar.contents.get('tbablog', '') SuggestionNotifier.send_slack_alert(slack_url, slack_message) else: status = 'already_reviewed' else: status = 'bad_suggestion' if status: self.redirect('/suggest/review?status={}'.format(status), abort=True) suggestions = Suggestion.query().filter( Suggestion.review_state == Suggestion.REVIEW_PENDING).filter( Suggestion.target_model == "robot").fetch(limit=50) reference_keys = [] for suggestion in suggestions: reference_key = suggestion.contents['reference_key'] reference = Media.create_reference( suggestion.contents['reference_type'], reference_key) reference_keys.append(reference) reference_futures = ndb.get_multi_async(reference_keys) references = map(lambda r: r.get_result(), reference_futures) suggestions_and_references = zip(suggestions, references) self.template_values.update({ "suggestions_and_references": suggestions_and_references, }) self.response.out.write(jinja2_engine.render('suggest_designs_review.html', self.template_values)) @ndb.transactional(xg=True) def _process_accepted(self, accept_key): """ Performs all actions for an accepted Suggestion in a Transaction. Suggestions are processed one at a time (instead of in batch) in a Transaction to prevent possible race conditions. """ # Async get suggestion_future = Suggestion.get_by_id_async(accept_key) # Resolve async Futures suggestion = suggestion_future.get_result() # Make sure Suggestion hasn't been processed (by another thread) if suggestion.review_state != Suggestion.REVIEW_PENDING: return team_reference = Media.create_reference( suggestion.contents['reference_type'], suggestion.contents['reference_key']) media = MediaCreator.create_media(suggestion, team_reference) # Mark Suggestion as accepted suggestion.review_state = Suggestion.REVIEW_ACCEPTED suggestion.reviewer = self.user_bundle.account.key suggestion.reviewed_at = datetime.datetime.now() # Do all DB writes MediaManipulator.createOrUpdate(media) suggestion.put() def post(self): self.verify_permissions() accept_keys = [] reject_keys = [] for value in self.request.POST.values(): logging.debug(value) split_value = value.split('::') if len(split_value) == 2: key = split_value[1] else: continue if value.startswith('accept'): accept_keys.append(key) elif value.startswith('reject'): reject_keys.append(key) # Process accepts for accept_key in accept_keys: self._process_accepted(accept_key) # Process rejects rejected_suggestion_futures = [Suggestion.get_by_id_async(key) for key in reject_keys] rejected_suggestions = map(lambda a: a.get_result(), rejected_suggestion_futures) for suggestion in rejected_suggestions: if suggestion.review_state == Suggestion.REVIEW_PENDING: suggestion.review_state = Suggestion.REVIEW_REJECTED suggestion.reviewer = self.user_bundle.account.key suggestion.reviewed_at = datetime.datetime.now() ndb.put_multi(rejected_suggestions) self.redirect("/suggest/cad/review")
Python
0
@@ -1584,32 +1584,33 @@ ad.com/library/%7B +2 %7D%7Csuggestion%3E fo @@ -1651,32 +1651,32 @@ m/team/%7B -2%7D/%7B3%7D%7C%7B2 +3%7D/%7B4%7D%7C%7B3 %7D in %7B -3 +4 %7D%3E%22.form @@ -2491,16 +2491,17 @@ ibrary/%7B +2 %7D%7Csugges @@ -2554,24 +2554,24 @@ am/%7B -2%7D/%7B3%7D%7C%7B2 +3%7D/%7B4%7D%7C%7B3 %7D in %7B -3 +4 %7D%3E%22.
805708048f493ca538a9e0b8d9d40ae1d4baf2c3
Add a tool to reproduce HTTP KeepAlive races in OpenStack gate jobs.
keepalive-race/keep-alive-race.py
keepalive-race/keep-alive-race.py
Python
0.000029
@@ -0,0 +1,1380 @@ +#!/usr/bin/python3%0A%22%22%22%0AThis script demonstrates a race condition with HTTP/1.1 keepalive%0A%22%22%22%0Aimport decimal%0Aimport json%0Aimport subprocess%0Aimport time%0Aimport threading%0A%0Aimport requests%0Arequests.packages.urllib3.disable_warnings()%0A%0ACREDS = json.loads(subprocess.check_output(%0A %22openstack --os-cloud devstack token issue -f json%22.split(),%0A).decode())%0AURL = 'https://10.0.1.44:8774/v2/%25s/servers/detail' %25 (CREDS%5B'project_id'%5D)%0A%0A%0Adef decimal_range(x, y, jump):%0A x = decimal.Decimal(x)%0A y = decimal.Decimal(y)%0A jump = decimal.Decimal(jump)%0A while x %3C y:%0A yield float(x)%0A x += jump%0A%0A%0Adef get(exit):%0A for delay in decimal_range(4.95, 4.96, 0.005):%0A session = requests.Session()%0A%0A if exit.is_set():%0A return%0A%0A for i in range(10):%0A%0A if exit.is_set():%0A return%0A%0A time.sleep(delay)%0A headers = %7B%0A 'User-Agent': 'timeout-race/%25s' %25 i,%0A 'X-Auth-Token': CREDS%5B'id'%5D%0A %7D%0A try:%0A session.get(URL, verify=False, headers=headers)%0A except Exception as e:%0A print(e)%0A exit.set()%0A%0A%0Athreads = %5B%5D%0Aexit = threading.Event()%0Afor i in range(50):%0A threads.append(threading.Thread(target=get,args=(exit,)))%0A%0Afor thread in threads:%0A thread.start()%0A%0Afor thread in threads:%0A thread.join()%0A
e11de6b814da4e5ade9fadaa035d6141ab3c113f
add test that features can be read and written ok
test/test_features.py
test/test_features.py
Python
0
@@ -0,0 +1,1121 @@ +#!/usr/bin/python%0Aimport unittest%0Aimport RMF%0A%0Aclass GenericTest(unittest.TestCase):%0A def _create(self, path):%0A fh= RMF.create_rmf_file(path)%0A rt= fh.get_root_node()%0A reps=%5Brt.add_child(%22rep%22+str(i), RMF.REPRESENTATION) for i in range(0,5)%5D%0A sf= RMF.ScoreFactory(fh)%0A fn= rt.add_child(%22feature%22, RMF.FEATURE)%0A sd= sf.get(fn)%0A sd.set_score(10.0)%0A sd.set_representation(reps)%0A def _test(self, path):%0A fh= RMF.open_rmf_file_read_only(path)%0A rt= fh.get_root_node()%0A ch= rt.get_children()%0A fn= ch%5B-1%5D%0A reps= ch%5B:-1%5D%0A sf= RMF.ScoreConstFactory(fh)%0A sd= sf.get(fn)%0A print sd.get_score()%0A print reps%0A print sd.get_representation()%0A self.assert_(sd.get_score()==10)%0A self.assert_(sd.get_representation() == reps)%0A def test_multiparent(self):%0A %22%22%22Test that feature nodes work right%22%22%22%0A for suffix in RMF.suffixes:%0A path=RMF._get_temporary_file_path(%22alias2.%22+suffix)%0A print path%0A self._create(path)%0A self._test(path)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
d621948137f9b5df20a53b6e2b12dd61551f22bd
fix : bug in test_trigger, but why does it worked on my machine and not jenkins? strange.
test/test_triggers.py
test/test_triggers.py
#!/usr/bin/env python2.6 #Copyright (C) 2009-2010 : # Gabes Jean, [email protected] # Gerhard Lausser, [email protected] # #This file is part of Shinken. # #Shinken is free software: you can redistribute it and/or modify #it under the terms of the GNU Affero General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #Shinken is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU Affero General Public License for more details. # #You should have received a copy of the GNU Affero General Public License #along with Shinken. If not, see <http://www.gnu.org/licenses/>. # # This file is used to test reading and processing of config files # #It's ugly I know.... from shinken_test import * from shinken.trigger import Trigger class TestTriggers(ShinkenTest): # Uncomment this is you want to use a specific configuration # for your test def setUp(self): self.setup_with_file('etc/nagios_triggers.cfg') # Change ME :) def test_simple_triggers(self): # # Config is not correct because of a wrong relative path # in the main config file # svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "test_ok_0") code = '''r = self.get_name()'''.replace(r'\n', '\n').replace(r'\t', '\t') t = Trigger({'trigger_name' : 'none', 'code_src': code}) t.compile() r = t.eval(svc) print r code = '''self.output = "Moncul c'est du poulet" '''.replace(r'\n', '\n').replace(r'\t', '\t') t = Trigger({'trigger_name' : 'none', 'code_src': code}) t.compile() r = t.eval(svc) print "Service output", svc.output self.assert_(svc.output == "Moncul c'est du poulet") code = '''self.output = "Moncul c'est du poulet2" self.perf_data = "Moncul c'est du poulet3" '''.replace(r'\n', '\n').replace(r'\t', '\t') t = Trigger({'trigger_name' : 'none', 'code_src': code}) t.compile() r = t.eval(svc) print "Service output", svc.output print "Service perf_data", svc.perf_data self.assert_(svc.output == "Moncul c'est du poulet2") self.assert_(svc.perf_data == "Moncul c'est du poulet3") # Change ME :) def test_in_conf_trigger(self): svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "i_got_trigger") print 'will run', svc.trigger # Go! svc.eval_triggers() print "Output", svc.output print "Perf_Data", svc.perf_data self.assert_(svc.output == "New output") self.assert_(svc.perf_data == "New perf_data") # Try to catch the perf_datas of self def test_simple_cpu_too_high(self): svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "cpu_too_high") svc.output = 'I am OK' svc.perf_data = 'cpu=95%' # Go launch it! svc.eval_triggers() print "Output", svc.output print "Perf_Data", svc.perf_data self.assert_(svc.output == "not good!") self.assert_(svc.perf_data == "cpu=95%") # Same with an host host = self.sched.hosts.find_by_name("test_host_trigger") host.output = 'I am OK' host.perf_data = 'cpu=95%' # Go launch it! host.eval_triggers() self.scheduler_loop(2, []) print "Output", host.output print "Perf_Data", host.perf_data self.assert_(host.output == "not good!") self.assert_(host.perf_data == "cpu=95") # Try to catch the perf_datas of self def test_morecomplex_cpu_too_high(self): svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "cpu_too_high_bis") svc.output = 'I am OK' svc.perf_data = 'cpu=95%' # Go launch it! svc.eval_triggers() self.scheduler_loop(2, []) print "Output", svc.output print "Perf_Data", svc.perf_data self.assert_(svc.output == "not good!") self.assert_(svc.perf_data == "cpu=95") # Try to load .trig files def test_trig_file_loading(self): svc = self.sched.services.find_srv_by_name_and_hostname("test_host_0", "cpu_too_high_ter") t = self.conf.triggers.find_by_name('simple_cpu') self.assert_(t in svc.triggers) svc.output = 'I am OK' svc.perf_data = 'cpu=95%' svc.eval_triggers() self.scheduler_loop(2, []) print "Output", svc.output print "Perf_Data", svc.perf_data self.assert_(svc.output == "not good!") self.assert_(svc.perf_data == "cpu=95") # same for host host = self.sched.hosts.find_by_name('test_host_trigger2') t = self.conf.triggers.find_by_name('simple_cpu') self.assert_(t in host.triggers) host.output = 'I am OK' host.perf_data = 'cpu=95%' host.eval_triggers() self.scheduler_loop(2, []) print "Output", host.output print "Perf_Data", host.perf_data self.assert_(host.output == "not good!") self.assert_(host.perf_data == "cpu=95") if __name__ == '__main__': unittest.main()
Python
0
@@ -930,16 +930,24 @@ shinken. +objects. trigger
48da7ceb86387d3cb6fd53f50110232813123ecc
Add tests for ansible roster virtual
tests/pytests/unit/roster/test_ansible.py
tests/pytests/unit/roster/test_ansible.py
Python
0
@@ -0,0 +1,423 @@ +import pytest%0Aimport salt.roster.ansible as ansible%0Afrom tests.support.mock import patch%0A%0A%[email protected]%[email protected](%0A %22which_value%22,%0A %5BFalse, None%5D,%0A)%0Adef test_virtual_returns_False_if_ansible_inventory_doesnt_exist(which_value):%0A with patch(%22salt.utils.path.which%22, autospec=True, return_value=which_value):%0A assert ansible.__virtual__() == (False, %22Install %60ansible%60 to use inventory%22)%0A
f3e3ab4fea1d367578adffbefd072616beaee65e
Create word_a10n.py
word_a10n.py
word_a10n.py
Python
0.999953
@@ -0,0 +1,296 @@ +#Kunal Gautam%0A#Codewars : @Kunalpod%0A#Problem name: Word a10n (abbreviation)%0A#Problem level: 6 kyu%0A%0Aimport re%0Adef abbreviate(s):%0A words = re.findall('%5BA-Za-z%5D%5BA-Za-z%5D%5BA-Za-z%5D%5BA-Za-z%5D+', s)%0A for word in words:%0A s = s.replace(word, word%5B0%5D + str(len(word) - 2) + word%5B-1%5D)%0A return s%0A
8dd3207298e7d81f5d4abdfa62604d5849d132fd
Add Python hello client
example/hello.py
example/hello.py
Python
0.000206
@@ -0,0 +1,656 @@ +#!/usr/bin/env python%0A%0Aimport dbus%0A%0Abus = dbus.SessionBus()%0Aremote_object = bus.get_object(%22org.za.hem.DBus%22, %22/Root%22)%0A%0Adbus_interface = %22org.designfu.SampleInterface%22%0Aiface = dbus.Interface(remote_object, dbus_interface)%0A%0Ahello_reply_list = remote_object.HelloWorld(17, %22Hello from example-client.py!%22, dbus_interface=dbus_interface)%0Aprint (hello_reply_list)%0A%0Aremote_object.HelloWorld(1, %7B%22php%22:%22Rasmus Lerdorf%22,%5C%0A %22perl%22:%22Larry Wall%22,%5C%0A %22python%22:%22Guido van Rossum%22%7D)%0A%0Ahello_reply_tuple = iface.GetTuple()%0Aprint str(hello_reply_tuple)%0A%0Ahello_reply_dict = iface.GetDict()%0Aprint str(hello_reply_dict)%0A
5836eb513b244a21e33e111cd3c3d6f33530aeae
Add a simple widget that sets up basic layout for common controls.
source/harmony/ui/widget/simple.py
source/harmony/ui/widget/simple.py
Python
0
@@ -0,0 +1,1337 @@ +# :coding: utf-8%0A# :copyright: Copyright (c) 2013 Martin Pengelly-Phillips%0A# :license: See LICENSE.txt.%0A%0Afrom PySide import QtGui%0A%0Afrom .base import Widget%0A%0A%0Aclass Simple(Widget):%0A '''Simple widget that wraps a single control.'''%0A%0A def _construct(self):%0A '''Construct widget.'''%0A super(Simple, self)._construct()%0A self.setLayout(QtGui.QHBoxLayout())%0A%0A self.layout().addWidget(self._requiredIndicator)%0A self.layout().addWidget(self._titleLabel)%0A%0A self._prefix = QtGui.QFrame()%0A self._prefix.setLayout(QtGui.QHBoxLayout())%0A self._prefix.layout().addWidget(self._requiredIndicator)%0A self._prefix.layout().addWidget(self._titleLabel)%0A self.layout().addWidget(self._prefix, stretch=0)%0A%0A self._control = self._constructControl()%0A self.layout().addWidget(self._control, stretch=1)%0A self.layout().addWidget(self._errorIndicator, stretch=0)%0A%0A def _constructControl(self):%0A '''Return the control widget.%0A%0A Subclasses should override this to return an appropriate control%0A widget.%0A%0A '''%0A raise NotImplementedError()%0A%0A def value(self):%0A '''Return current value.'''%0A raise NotImplementedError()%0A%0A def setValue(self, value):%0A '''Set current *value*.'''%0A raise NotImplementedError()%0A
8628ab8cbcb185826e97af9148ec7d07861e29e7
Add setup_pfiles.py to setup parameter files for CIAO tools
scripts/setup_pfiles.py
scripts/setup_pfiles.py
Python
0
@@ -0,0 +1,925 @@ +# Copyright (c) 2017 Weitian LI %[email protected]%3E%0A# MIT license%0A#%0A# Weitian LI%0A# 2017-02-06%0A%0A%22%22%22%0APrepare the CIAO parameter files and setup the PFILES environment%0Avariable to keep the pfiles locally, in order to avoid the conflicts%0Abetween multiple instance of the same CIAO tools.%0A%22%22%22%0A%0Aimport os%0Aimport subprocess%0Aimport shutil%0A%0A%0Adef setup_pfiles(tools):%0A %22%22%22%0A Copy the parameter files of the specified tools to the current%0A working directory, and setup the %60%60PFILES%60%60 environment variable.%0A%0A Parameters%0A ----------%0A tools : list%5Bstr%5D%0A Name list of the tools to be set up%0A %22%22%22%0A for tool in tools:%0A pfile = subprocess.check_output(%5B%0A %22paccess%22, tool%0A %5D).decode(%22utf-8%22).strip()%0A subprocess.check_call(%5B%22punlearn%22, tool%5D)%0A shutil.copy(pfile, %22.%22)%0A # Setup the %60%60PFILES%60%60 environment variable%0A os.environ%5B%22PFILES%22%5D = %22./:%22 + os.environ%5B%22PFILES%22%5D%0A
6c8966e0e299c12e95d41009a8dde7519946b432
add db level constraint for one active subscription per domain
corehq/apps/accounting/migrations/0006_unique_active_domain_subscription.py
corehq/apps/accounting/migrations/0006_unique_active_domain_subscription.py
Python
0
@@ -0,0 +1,727 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.7 on 2017-04-22 17:18%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0Afrom corehq.sql_db.operations import HqRunSQL%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('accounting', '0005_automatic_downgrade_adjustment_method'),%0A %5D%0A%0A operations = %5B%0A HqRunSQL(%0A %22%22%22%0A CREATE UNIQUE INDEX accounting_subscription_active_subscriber%0A ON accounting_subscription(subscriber_id) WHERE (is_active = TRUE and is_hidden_to_ops = FALSE);%0A %22%22%22,%0A reverse_sql=%0A %22%22%22%0A DROP INDEX accounting_subscription_active_subscriber;%0A %22%22%22,%0A )%0A %5D%0A
faaa206923b99f4d986a32ddfd854b234377e988
Add ena2fasta script
bin/ena2fasta.py
bin/ena2fasta.py
Python
0
@@ -0,0 +1,1006 @@ +#!/usr/bin/env python%0A%0A# -*- coding: utf-8 -*-%0A%0A%22%22%22%0ACopyright %5B2009-2020%5D EMBL-European Bioinformatics Institute%0ALicensed under the Apache License, Version 2.0 (the %22License%22);%0Ayou may not use this file except in compliance with the License.%0AYou may obtain a copy of the License at%0Ahttp://www.apache.org/licenses/LICENSE-2.0%0AUnless required by applicable law or agreed to in writing, software%0Adistributed under the License is distributed on an %22AS IS%22 BASIS,%0AWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0ASee the License for the specific language governing permissions and%0Alimitations under the License.%0A%22%22%22%0A%0A%0Aimport click%0A%0Afrom Bio import SeqIO%0A%0A%[email protected]()%[email protected]('filename', type=click.File('r'))%[email protected]('output', type=click.File('w'))%0Adef main(filename, output):%0A %22%22%22%0A Convert a ENA EMBL file into a fasta file suitable for ribotyper analysis.%0A %22%22%22%0A SeqIO.convert(filename, %22embl%22, output, %22fasta%22)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
c913d1fc3ac24b2491783dc9c33f401180c7b4b0
test delivering with failures, refactor tests
tests/test_postman.py
tests/test_postman.py
from pytest import fixture from mock import Mock, call from mailthon.postman import Postman from mailthon.envelope import Envelope from mailthon.enclosure import PlainText @fixture def smtp(): smtp = Mock() smtp.return_value = smtp smtp.noop.return_value = (250, 'ok') smtp.sendmail.return_value = {} return smtp @fixture def envelope(): env = Envelope( headers={ 'From': 'Me <[email protected]>', 'To': '[email protected]', 'Subject': 'subject', }, enclosure=[ PlainText('Hi!'), ], ) env.string = Mock(return_value='--email--') return env class TestPostman: host = 'smtp.mail.com' port = 587 @fixture def postman(self, smtp): postman = Postman(self.host, self.port) postman.transport = smtp return postman def test_connection(self, postman): with postman.connection() as conn: assert conn.mock_calls == [ call(self.host, self.port), call.ehlo(), ] def test_options(self, postman): postman.options = dict(timeout=0) with postman.connection() as conn: expected = call(self.host, self.port, timeout=0) assert conn.mock_calls[0] == expected def test_deliver(self, postman, envelope): with postman.connection() as conn: r = postman.deliver(conn, envelope) calls = [ call.sendmail(envelope.sender.encode(), [k.encode() for k in envelope.receivers], envelope.string()), call.noop(), ] conn.assert_has_calls(calls, any_order=True) assert r.ok def test_send(self, postman, smtp, envelope): postman.deliver = Mock(return_value=1) assert postman.send(envelope) == 1 assert postman.deliver.mock_calls == [ call(smtp, envelope) ] def test_use(self, postman): postman.use(lambda conn: conn.login('username', 'password')) with postman.connection() as smtp: assert smtp.login.mock_calls == [call('username', 'password')]
Python
0.000001
@@ -165,16 +165,65 @@ ainText%0A +from mailthon.headers import sender, to, subject%0A %0A%0A@fixtu @@ -444,30 +444,16 @@ ers= -%7B%0A 'From': +%5Bsender( 'Me @@ -466,16 +466,17 @@ il.com%3E' +) ,%0A @@ -485,14 +485,16 @@ -'To': + to( 'him @@ -503,16 +503,17 @@ ail.com' +) ,%0A @@ -522,19 +522,21 @@ + -'S + s ubject -': +( 'sub @@ -544,19 +544,10 @@ ect' -,%0A %7D +)%5D ,%0A @@ -563,29 +563,16 @@ losure=%5B -%0A PlainTex @@ -579,26 +579,16 @@ t('Hi!') -,%0A %5D,%0A ) @@ -1463,17 +1463,33 @@ -calls = %5B +sendmail = call.sendmail( %0A @@ -1501,30 +1501,16 @@ -call.sendmail( envelope @@ -1527,30 +1527,16 @@ code(),%0A - @@ -1601,30 +1601,16 @@ - - envelope @@ -1618,17 +1618,16 @@ string() -) ,%0A @@ -1623,32 +1623,39 @@ g(),%0A + )%0A call.noop() @@ -1638,24 +1638,34 @@ )%0A + noop = call.noop() @@ -1664,31 +1664,16 @@ l.noop() -,%0A %5D %0A%0A @@ -1700,21 +1700,32 @@ s_calls( -calls +%5Bsendmail, noop%5D , any_or @@ -1776,18 +1776,41 @@ est_ -send +deliver_with_failures (self, + smtp, pos @@ -1815,22 +1815,16 @@ ostman, -smtp, envelope @@ -1826,23 +1826,168 @@ elope):%0A + smtp.sendmail.return_value = %7B%0A 'addr': (255, 'something-bad'),%0A %7D%0A%0A with postman.connection() as conn:%0A + r = postman @@ -1998,46 +1998,190 @@ iver - = Mock(return_value=1)%0A assert +(conn, envelope)%0A%0A assert not r.rejected%5B'addr'%5D.ok%0A assert not r.ok%0A%0A def test_send(self, postman, smtp, envelope):%0A postman.deliver = Mock()%0A pos @@ -2203,13 +2203,8 @@ ope) - == 1 %0A
c6af972ca6dfd6396b3f16e2e218263faffe16ab
Add run_mraic.py
bin/run_mraic.py
bin/run_mraic.py
Python
0.000005
@@ -0,0 +1,1687 @@ +#!/usr/bin/env python%0A%0A%22%22%22%0A%0AName: run_mraic.py%0A%0AAuthor: Michael G. Harvey%0ADate: 5 July 2013%0A%0ADescription: Run mraic.pl (Nylanderb 2004) on a folder of alignments in phylip/phyml format.%0A%0AUsage: python run_mraic.py mraic_dir in_dir out_dir%0A%0Apython run_mraic.py /Users/michaelharvey/Applications/mraic /Users/michaelharvey/Desktop/pic/beast/deep_UCEs/77_loci_phylip ~/Desktop/mraic_out%0Apython run_mraic.py /Users/michaelharvey/Applications/mraic /Users/michaelharvey/Desktop/pic/beast/shallow_UCEs/Xm/orthologs/phylip ~/Desktop/mraic_UCE_shallow_out%0A%0A%22%22%22%0A%0A%0A%0Aimport os%0Aimport sys%0Aimport argparse%0A%0Adef get_args():%0A%09parser = argparse.ArgumentParser(%0A%09%09%09description=%22%22%22Program description%22%22%22)%0A%09parser.add_argument(%0A%09%09%09%22mraic_dir%22,%0A%09%09%09type=str,%0A%09%09%09help=%22%22%22The directory for mraic.pl%22%22%22%0A%09%09)%0A%09parser.add_argument(%0A%09%09%09%22in_dir%22,%0A%09%09%09type=str,%0A%09%09%09help=%22%22%22The output directory%22%22%22%0A%09%09)%0A%09parser.add_argument(%0A%09%09%09%22out_dir%22,%0A%09%09%09type=str,%0A%09%09%09help=%22%22%22The output directory%22%22%22%0A%09%09)%0A%09return parser.parse_args()%0A%09%0A%0Adef main():%0A%09args = get_args()%0A%09outfile = open(%22%7B0%7D/mraic_out.txt%22.format(args.out_dir), 'wb')%0A%09files = list()%0A%09prefiles = os.listdir(%22%7B0%7D%22.format(args.in_dir))%0A%09for prefile in prefiles: # Remove hidden files%0A%09%09if not prefile.startswith('.'):%0A%09%09%09files.append(prefile)%0A%09os.chdir(%22%7B0%7D%22.format(args.mraic_dir))%0A%09for file in files:%0A%09%09os.system(%22perl mraic.pl %7B0%7D/%7B1%7D%22.format(args.in_dir, file))%0A%09%09infile = open(%22%7B0%7D/%7B1%7D.MrAIC.txt%22.format(args.in_dir, file), 'r')%0A%09%09for line in infile:%0A%09%09%09if line.startswith(%22Minimum AICc model:%22):%0A%09%09%09%09parts = line.split()%0A%09%09%09%09outfile.write(%22%7B0%7D%5Ct%7B1%7D%5Cn%22.format(file, parts%5B3%5D))%09%09%09%09%0A%09%09infile.close()%0A%09%09outfile.flush()%0A%09outfile.close()%0A%0Aif __name__ == '__main__':%0A%09main()
2e43441e43942b2f24a89209cfdc56652523901d
Create test_pir.py
test/test_pir.py
test/test_pir.py
Python
0.000051
@@ -0,0 +1,166 @@ +from ...%0A%0Aif __name__ == %22__main__%22:%0A%09try :%0A%09%09pir = PIR()%0A%09%09pir.start()%09%0A%09%09while 1 :%0A%09%09%09print(pir.result())%0A%09except KeyboardInterrupt :%09%0A%09%09pir.stop()%0A%09%09pir.cleanup()%0A
ed2548ca027b4fd062a10ddf2ce359d9115f40a4
add a __main__.py to nuitka works
borg/__main__.py
borg/__main__.py
Python
0.000037
@@ -0,0 +1,39 @@ +from borg.archiver import main%0Amain()%0A%0A
8195278aa5044371c8fa7963be15169209e1b92a
Add new test to call neutron API and check results in dragonflow db.
dragonflow/tests/fullstack/test_neutron_api.py
dragonflow/tests/fullstack/test_neutron_api.py
Python
0.000006
@@ -0,0 +1,2941 @@ +# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom dragonflow.common import common_params%0Afrom dragonflow.common import exceptions as df_exceptions%0Afrom neutron.common import config as common_config%0Afrom neutron.tests import base%0Afrom neutronclient.neutron import client%0Aimport os_client_config%0Afrom oslo_config import cfg%0Afrom oslo_serialization import jsonutils%0Afrom oslo_utils import importutils%0A%0A%0Acfg.CONF.register_opts(common_params.df_opts, 'df')%0A%0A%0Adef get_cloud_config(cloud='devstack-admin'):%0A return os_client_config.OpenStackConfig().get_one_cloud(cloud=cloud)%0A%0A%0Adef credentials(cloud='devstack-admin'):%0A %22%22%22Retrieves credentials to run functional tests%22%22%22%0A return get_cloud_config(cloud=cloud).get_auth_args()%0A%0A%0Aclass TestNeutronAPIandDB(base.BaseTestCase):%0A%0A def setUp(self):%0A super(TestNeutronAPIandDB, self).setUp()%0A creds = credentials()%0A tenant_name = creds%5B'project_name'%5D%0A auth_url = creds%5B'auth_url'%5D + %22/v2.0%22%0A self.neutron = client.Client('2.0', username=creds%5B'username'%5D,%0A password=creds%5B'password'%5D, auth_url=auth_url,%0A tenant_name=tenant_name)%0A self.neutron.format = 'json'%0A common_config.init(%5B'--config-file', '/etc/neutron/neutron.conf'%5D)%0A db_driver_class = importutils.import_class(cfg.CONF.df.nb_db_class)%0A self.db_driver = db_driver_class()%0A self.db_driver.initialize(db_ip=cfg.CONF.df.remote_db_ip,%0A db_port=cfg.CONF.df.remote_db_port)%0A%0A def test_create_network(self):%0A test_network = 'mynetwork1'%0A network = %7B'name': test_network, 'admin_state_up': True%7D%0A network = self.neutron.create_network(%7B'network': network%7D)%0A if not network or not network%5B'network'%5D:%0A self.fail(%22Failed to create network using neutron API%22)%0A network_id = network%5B'network'%5D%5B'id'%5D%0A table = 'lswitch'%0A try:%0A value = self.db_driver.get_key(table, network_id)%0A except df_exceptions.DBKeyNotFound:%0A self.fail(%22Failed to create network using neutron API%22)%0A return%0A value2 = jsonutils.loads(value)%0A if 'external_ids' in value2:%0A if (value2%5B'external_ids'%5D%5B'neutron:network_name'%5D ==%0A test_network):%0A self.neutron.delete_network(network_id)%0A return%0A self.fail(%22Failed to find newly created network in Dragonflow DB%22)%0A
fefe47a03337d072e47b439a940d7b6eeef56b93
Add OpenTracing example
example/opentracing.py
example/opentracing.py
Python
0
@@ -0,0 +1,1055 @@ +# encoding=utf-8%0Aimport opentracing%0Aimport instana%0Aimport time%0A%0A# Loop continuously with a 2 second sleep to generate traces%0Awhile True:%0A entry_span = opentracing.tracer.start_span('universe')%0A%0A entry_span.set_tag('http.method', 'GET')%0A entry_span.set_tag('http.url', '/users')%0A entry_span.set_tag('span.kind', 'entry')%0A%0A intermediate_span = opentracing.tracer.start_span('nebula', child_of=entry_span)%0A intermediate_span.finish()%0A%0A db_span = opentracing.tracer.start_span('black-hole', child_of=entry_span)%0A db_span.set_tag('db.instance', 'users')%0A db_span.set_tag('db.statement', 'SELECT * FROM user_table')%0A db_span.set_tag('db.type', 'mysql')%0A db_span.set_tag('db.user', 'mysql_login')%0A db_span.set_tag('span.kind', 'exit')%0A db_span.finish()%0A%0A intermediate_span = opentracing.tracer.start_span('space-dust', child_of=entry_span)%0A intermediate_span.log_kv(%7B'message': 'All seems ok'%7D)%0A intermediate_span.finish()%0A%0A entry_span.set_tag('http.status_code', 200)%0A entry_span.finish()%0A time.sleep(2)%0A
f3448d0b37fdac8976a2cc8e4604a6cb2ea7a4ed
add vsmlib.utils which should have been part of commit 6238f235
vsmlib/utils.py
vsmlib/utils.py
Python
0
@@ -0,0 +1,1363 @@ +def parse_signal_num_file(filename):%0A signal_to_num = %7B%7D%0A vsi_version = -1%0A try:%0A with open(filename) as signal_to_num_file:%0A lines = signal_to_num_file.readlines()%0A for line in lines:%0A line_stripped = line.strip()%0A if vsi_version %3C 0:%0A try:%0A vsi_version = float(line_stripped)%0A except ValueError as err:%0A print(%22failed to parse VSI file version number from %22 %5C%0A %22line: %7B%7D: %7B%7D%22.format(line, err),%0A file=sys.stderr)%0A exit(1)%0A else:%0A try:%0A signal, signum_str = line_stripped.split(%22 %22)%0A signal = signal.strip()%0A signum = int(signum_str.strip())%0A signal_to_num%5Bsignal%5D = signum%0A except ValueError as err:%0A print(%22malformed signal number file line: line: %7B%7D: %22 %5C%0A %22%7B%7D%22.format(line, err), file=sys.stderr)%0A exit(1)%0A except Exception as file_err:%0A print(%22failed to open signal number file: %7B%7D%22.format(file_err),%0A file=sys.stderr)%0A exit(1)%0A%0A return signal_to_num, vsi_version%0A
8bd66387ba5cd50dc0b545dc7b627792ed601faa
Add test
tests/context.py
tests/context.py
Python
0.000005
@@ -0,0 +1,77 @@ +import os%0Aimport sys%0Asys.path.insert(0, os.path.abspath('..'))%0A%0Aimport gaend%0A
59e546ae5afe22aab967e5376c8799e29ccbd86a
Add the basic version of my file comparison script
directoryFileContentCmp.py
directoryFileContentCmp.py
Python
0
@@ -0,0 +1,1829 @@ +#! /usr/env/python%0Aimport os%0Aimport hashlib%0Aimport sys%0A%0Abufsize = 65536%0A# Path1 = '/Users/kirkchambers/Desktop'%0A# Path2 = '/Users/kirkchambers/DataSets'%0A%0Adef generate_file_digests_for(path):%0A%09path_set = set()%0A%09for item in os.walk(path):%0A%09%09(directory, _subdirectories, files) = item%0A%09%09for file in files:%0A%09%09%09if (file%5B0%5D == '.'):%0A%09%09%09%09continue%0A%09%09%09else:%0A%09%09%09%09fqFilename = os.path.join(directory, file)%0A%09%09%09%09path_set.add(generate_file_digest(fqFilename, file))%0A%09return path_set%0A%0A%0Adef generate_file_digest(fqFilename, shortFilename):%0A%09hasher = hashlib.md5()%0A%09with open(fqFilename, 'rb') as filestream:%0A%09%09fileBuffer = filestream.read(bufsize)%0A%09%09while len(fileBuffer) %3E 0:%0A%09%09%09hasher.update(fileBuffer)%0A%09%09%09fileBuffer = filestream.read(bufsize)%0A%09# return %22Filename:%7Bfile%7D%5CnHash:%7Bhash%7D%5CnSize:%7Bsize%7D%5Cn%22.format(file=fqFilename, hash=hasher.hexdigest(), size=os.path.getsize(fqFilename))%0A%09return (hasher.hexdigest(), fqFilename, os.path.getsize(fqFilename))%0A%0A%0Adef usage():%0A%09print %22file_list.py directory1 directory2%22%0A%09print %22Prints out the files present in directory1 which are NOT present in directory2%22%0A%0A%0Aif __name__ == %22__main__%22:%0A%09try:%0A%09%09(_command, Path1, Path2) = sys.argv%0A%09except:%0A%09%09usage()%0A%09%09exit(1)%0A%0A%09path_set_1 = generate_file_digests_for(Path1)%0A%09path_set_2 = generate_file_digests_for(Path2)%0A%0A%09# union = path_set_1 %7C path_set_2%0A%09set_1_exclusives = path_set_1 - path_set_2%0A%09# set_2_exclusives = path_set_2 - path_set_1%0A%09# print %22length of 1: %7B%7D%22.format(len(path_set_1))%0A%09# print %22length of 2: %7B%7D%22.format(len(path_set_2))%0A%09# print %22length of union: %7B%7D%22.format(len(union))%0A%09# print %22length of set1 uniqs: %7B%7D%22.format(len(set_1_exclusives))%0A%09# print %22length of set2 uniqs: %7B%7D%22.format(len(set_2_exclusives))%0A%0A%09print %22Files present in %7Bpath1%7D and not in %7Bpath2%7D:%22.format(path1=Path1, path2=Path2)%0A%09for item in set_1_exclusives:%0A%09%09print item%5B1%5D
345758259d9ee80826758373c3970db1c28a870b
Bump development version
djangocms_blog/__init__.py
djangocms_blog/__init__.py
__version__ = '0.3.a2'
Python
0
@@ -17,7 +17,7 @@ .3.a -2 +3 '%0A
4af2a6a62e4be78bd20550c3ae5089c51b4fec62
add separate function for pagination
students/utils.py
students/utils.py
Python
0.000007
@@ -0,0 +1,1198 @@ +from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger%0A%0A%0Adef paginate(objects, size, request, context, var_name='object_list'):%0A %22%22%22Paginate objects provided by view.%0A%0A This function takes:%0A * list of elements;%0A * number of objects per page;%0A * request object to get url parameters from;%0A * context to set new variables into;%0A * var_name - variable name for list of objects.%0A%0A It returns updated context object.%0A %22%22%22%0A%0A # apply pagination%0A paginator = Paginator(objects, size)%0A%0A # try to get page number from request%0A page = request.GET.get('page', '1')%0A try:%0A object_list = paginator.page(page)%0A except PageNotAnInteger:%0A # if page is not an integer, deliver first page%0A object_list = paginator.page(1)%0A except EmptyPage:%0A # if page is out of range (e.g. 9999),%0A # deliver last page of results%0A object_list = paginator.page(paginator.num_pages)%0A%0A # set variables into context%0A context%5Bvar_name%5D = object_list%0A context%5B'is_paginated'%5D = object_list.has_other_pages()%0A context%5B'page_obj'%5D = object_list%0A context%5B'paginator'%5D = paginator%0A%0A return context%0A