commit
stringlengths
40
40
subject
stringlengths
1
3.25k
old_file
stringlengths
4
311
new_file
stringlengths
4
311
old_contents
stringlengths
0
26.3k
lang
stringclasses
3 values
proba
float64
0
1
diff
stringlengths
0
7.82k
2a6527c60d09c0cbb2f1902b57ae02ddade213eb
Create communicati.py
libs/communicati.py
libs/communicati.py
Python
0.000001
@@ -0,0 +1,1012 @@ +# communications.py%0A# M%C3%B3nica Mil%C3%A1n (@mncmilan)%0A# [email protected]%0A# http://steelhummingbird.blogspot.com.es/%0A%0A# Library that contains all necessary methods in order to enable communications between PC and eZ430-Chronos.%0A%0Aimport serial%0A%0As = serial.Serial('COM4', baudrate=115200,timeout=None) # open serial port%0A%0A%0Aclass CommunicationManager():%0A%0A def open_serial_port(self):%0A s.write(bytearray(%5B255, 7, 3%5D)) # starting communications with serial port%0A%0A def send_data_request(self):%0A s.write(bytearray(%5B255, 8, 7, 0, 0, 0, 0%5D)) # acceleration data request%0A bytesToRead = s.inWaiting()%0A return bytesToRead%0A%0A def read_from_labVIEW_request(self):%0A bytes_to_read = s.inWaiting()%0A inbyte = s.read(bytes_to_read)%0A return bytes_to_read, inbyte%0A%0A def read_data(self, bytes_to_read):%0A inbyte = s.read(bytes_to_read)%0A return inbyte%0A%0A def close_serial_port(self):%0A s.write(bytearray(%5B255, 9, 3%5D)) # stop transmitting%0A s.close()%0A
e5293f7e33740f210ab58c3c05db18829db1474d
add docstrings [skip ci]
mailthon/helpers.py
mailthon/helpers.py
""" mailthon.helpers ~~~~~~~~~~~~~~~~ Implements various helper functions/utilities. :copyright: (c) 2015 by Eeo Jun :license: MIT, see LICENSE for details. """ import sys import mimetypes from collections import MutableMapping from email.utils import formataddr if sys.version_info[0] == 3: bytes_type = bytes else: bytes_type = str def guess(filename, fallback='application/octet-stream'): """ Using the mimetypes library, guess the mimetype and encoding for a given *filename*. If the mimetype cannot be guessed, *fallback* is assumed instead. :param filename: Filename- can be absolute path. :param fallback: A fallback mimetype. """ guessed, encoding = mimetypes.guess_type(filename, strict=False) if guessed is None: return fallback, encoding return guessed, encoding def format_addresses(addrs): """ Given an iterable of addresses or name-address tuples *addrs*, return a header value that joins all of them together with a space and a comma. """ return ', '.join( formataddr(item) if isinstance(item, tuple) else item for item in addrs ) def encode_address(addr, encoding='utf-8'): """ Given an email address *addr*, try to encode it with ASCII. If it's not possible, encode the *local-part* with the *encoding* and the *domain* with IDNA. """ if isinstance(addr, bytes_type): return addr try: addr = addr.encode('ascii') except UnicodeEncodeError: if '@' in addr: localpart, domain = addr.split('@', 1) addr = b'@'.join([ localpart.encode(encoding), domain.encode('idna'), ]) else: addr = addr.encode(encoding) return addr class UnicodeDict(dict): def __init__(self, values=(), encoding='utf-8'): dict.__init__(self) self.encoding = encoding self.update(values) def __setitem__(self, key, value): if isinstance(value, bytes_type): value = value.decode(self.encoding) dict.__setitem__(self, key, value) update = MutableMapping.update
Python
0
@@ -1836,16 +1836,268 @@ (dict):%0A + %22%22%22%0A A dictionary that handles unicode values%0A magically - that is, byte-values are%0A automatically decoded. Accepts a dict%0A or iterable *values*.%0A%0A :param encoding: Default encoding used%0A if no encoding is specified.%0A %22%22%22%0A%0A def
ea652c892219d1ed08a0453a3b2ede3efb452e23
Create __init__.py
ui_techmenu/__init__.py
ui_techmenu/__init__.py
Python
0.000429
@@ -0,0 +1,1744 @@ +# -*- coding: utf-8 -*-%0A%0A######################################################################%0A#%0A# ui_techmenu - Explode Technical Menu for Odoo %0A# Copyright (C) 2012 - TODAY, Ursa Information Systems (%3Chttp://ursainfosystems.com%3E)%0A# Copyright (C) 2004-2010 Tiny SPRL (%3Chttp://tiny.be%3E)%0A# [email protected]%0A#%0A#%0A# Ursa is willing to revoke copyright claims in the future if Odoo wishes to certify this module.%0A#%0A######################################################################%0A#%0A# This program is free software: you can redistribute it and/or modify it under the terms of the GNU Affero General Public License %0A# as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.%0A#%0A# For clients with an annual support contract with Ursa, this program is warranted within the guidelines of that contract.%0A#%0A# For ALL OTHERS, this program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY (including the absence %0A# of implied warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE). See the GNU Affero General Public License for %0A# more information.%0A#%0A######################################################################%0A#%0A# You should have received a copy of the GNU Affero General Public License along with this program. The license is in the file %0A# named LICENSE in the top level directory and also provided on the web at %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A######################################################################%0A%0A# python dependencies (either files or classes) are designated below %0A# import %3Cfile_dependency%3E%0A# import %3Cclass_dependency%3E%0A%0A# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:%0A
841e8fe236eab35b803cb9d8bec201306ce4642e
Add script to generate big RUM_* files
util/repeat_rum_file.py
util/repeat_rum_file.py
Python
0
@@ -0,0 +1,512 @@ +from rum_mapping_stats import aln_iter%0Aimport argparse%0Aimport sys%0A%0Aparser = argparse.ArgumentParser()%0A%0Aparser.add_argument('--times', type=int)%0Aparser.add_argument('--max-seq', type=int)%0Aparser.add_argument('rum_file', type=file)%0A%0Aargs = parser.parse_args()%0A%0Aalns = list(aln_iter(args.rum_file))%0A%0Afor t in range(args.times):%0A for aln in alns:%0A old_read_num = aln.read_num%0A aln.read_num = old_read_num + t * args.max_seq%0A aln.write(sys.stdout)%0A aln.read_num = old_read_num%0A %0A
3ab98baaf2b81ffa1afef808f27608f06bc946d3
Create commands.py
web/commands.py
web/commands.py
Python
0.000011
@@ -0,0 +1,1507 @@ +#%0A# Commands for RPC interface%0A#%0A%0Afrom twisted.protocols.amp import AMP, Boolean, Integer, String, Float, Command%0A%0Aclass Sum(Command):%0A arguments = %5B('a', Integer()),%0A ('b', Integer())%5D%0A response = %5B('status', Integer())%5D%0A%0Aclass HeartbeatCmd(Command):%0A arguments = %5B('enabled', Boolean())%5D%0A response = %5B('status', Boolean())%5D%0A requiresAnswer = False%0A%0Aclass HaltCmd(Command):%0A arguments = %5B%5D%0A response = %5B('status', Boolean())%5D%0A requiresAnswer = False%0A%0Aclass ModeCmd(Command):%0A arguments = %5B('mode', String())%5D%0A response = %5B('status', String())%5D%0A requiresAnswer = False%0A%0Aclass QueryStatus(Command):%0A arguments = %5B%5D%0A response = %5B('fix', Boolean()),%0A ('lat', Float()),%0A ('lon', Float()),%0A ('gps_heading', Float()),%0A ('gps_speed', Float()),%0A ('altitude', Float()),%0A ('num_sat', Integer()),%0A ('timestamp', String()),%0A ('datestamp', String()),%0A ('compass_heading', Float()),%0A ('temperature', Float())%5D%0A%0Aclass NavigationCmd(Command):%0A arguments = %5B('speed', Float()), ('heading', Float())%5D%0A response = %5B('status', Boolean())%5D%0A requiresAnswer = False%0A%0Aclass ManualDriveCmd(Command):%0A arguments = %5B('throttle', Float()), ('steering', Float())%5D%0A response = %5B('status', Boolean())%5D%0A requiresAnswer = False%0A%0Aclass ExitCmd(Command):%0A arguments = %5B%5D%0A response = %5B%5D%0A requiresAnswer = False%0A
b5f8299cbe539cf2a01988ca25e0c7638400bc8c
Create stuff.py
bottomline/stuff.py
bottomline/stuff.py
Python
0.000001
@@ -0,0 +1,29 @@ +# Testing%0Aprint 'heck yeah!'%0A
5777877d1ed71ed21f67e096b08ad495ff844ed8
add testexample/simpleTestwithPython.py
testexample/simpleTestwithPython.py
testexample/simpleTestwithPython.py
Python
0.000001
@@ -0,0 +1,1800 @@ +import os%0Aimport re%0Aimport json%0Aimport sys%0Aimport getopt%0Aimport argparse%0Afrom docopt import docopt%0Afrom urllib2 import urlopen, Request%0Aimport urllib%0Aimport urllib2%0Aimport requests%0A%0Aurl_phenotypes = 'http://localhost:9000/api/phenotypes'%0Aurl_genotypes = 'http://localhost:9000/api/genotypes'%0Atoken = 'Bearer eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJJRCI6Ik5JQUdBRFMiLCJleHAiOjE0NjEzNjI0NTV9.-Roix0YvuPy9VHaWm9wE83yB7NiSunyVXsVlR74lu2Y'%0Aheaders = %7B'Authorization': '%25s' %25 token%7D%0Arequest_phenotypes = Request(url_phenotypes, headers=headers)%0Arequest_genotypes = Request(url_genotypes, headers=headers)%0A%0Aresponse_phenotypes = urlopen(request_phenotypes)%0Aresponse_genotypes = urlopen(request_genotypes)%0A%0Adata_phenotypes = json.loads(response_phenotypes.read())%0Adata_genotypes = json.loads(response_genotypes.read())%0A%0A%0Adef loadPhenotypes(data_phenotypes):%0A phenotypes_list = data_phenotypes%5B'phenotypes'%5D%0A for phenotype in phenotypes_list:%0A print(phenotype%5B'title'%5D)%0A print(phenotype%5B'family_id'%5D)%0A print(phenotype%5B'individual_id'%5D)%0A print(phenotype%5B'paternal_id'%5D)%0A print(phenotype%5B'maternal_id'%5D)%0A%0A%0Adef loadGenotypes(data_genotypes):%0A genotypes_list = data_genotypes%5B'genotypes'%5D%0A for genotype in genotypes_list:%0A print(genotype%5B'title'%5D)%0A print(genotype%5B'chr'%5D)%0A print(genotype%5B'coordinate'%5D)%0A print(genotype%5B'variant_id'%5D)%0A%0Adef postGenotypes(url_genotypes, token, headers):%0A values = %7B%22title%22:%22test%22,%22chr%22:%222%22,%22variant_id%22:%22snp4%22,%22location%22:%220%22,%22coordinate%22:%221111830%22,%22call%22:%22G T G T G G T T G T T T%22%7D%0A data = json.dumps(values)%0A req = requests.post(url_genotypes, data, headers=headers)%0A print req.status_code%0A%0A%0AloadPhenotypes(data_phenotypes)%0AloadGenotypes(data_genotypes)%0ApostGenotypes(url_genotypes, token, headers)%0A
606020fbb7c3e608c8eab19ca143919003ea4f7d
add some first tests.
test_triptan.py
test_triptan.py
Python
0
@@ -0,0 +1,1756 @@ +import os%0Afrom unittest import TestCase%0Afrom tempfile import TemporaryDirectory%0A%0Afrom triptan.core import Triptan%0A%0A%0Aclass TriptanInitializationTest(TestCase):%0A %22%22%22%0A Asserts that triptan can setup the necessary data correctly.%0A %22%22%22%0A%0A def test_init_file_structure(self):%0A %22%22%22%0A Assert the file structure is created correctly.%0A %22%22%22%0A with TemporaryDirectory() as tmpd:%0A Triptan.setup(%0A tmpd,%0A 'triptan.yml',%0A %7B'revisions_location': 'revisions'%7D%0A )%0A assert os.path.exists(os.path.join(tmpd, 'triptan.yml'))%0A assert os.path.exists(os.path.join(tmpd, 'revisions'))%0A%0A%0Aclass TriptanTest(TestCase):%0A %22%22%22%0A Assert the core functionality is working.%0A %22%22%22%0A%0A def setUp(self):%0A %22%22%22%0A Create a temporary directory and set triptan up with it.%0A %22%22%22%0A self.path = TemporaryDirectory()%0A Triptan.setup(%0A self.path.name,%0A 'triptan.yml',%0A %7B'revisions_location': 'revisions'%7D%0A )%0A self.triptan = Triptan(self.path.name, 'triptan.yml')%0A%0A def test_default_revision(self):%0A %22%22%22%0A Assert the default revision is -1.%0A %22%22%22%0A assert self.triptan.current_revision == -1%0A%0A def test_revision_creation(self):%0A %22%22%22%0A Assert that revisions are correctly created.%0A %22%22%22%0A self.triptan.new_revision(%22test revision%22)%0A rev_path = os.path.join(self.path.name, 'revisions/revision-000.py')%0A assert os.path.exists(rev_path)%0A%0A self.triptan.new_revision(%22another%22)%0A rev_path = os.path.join(self.path.name, 'revisions/revision-001.py')%0A assert os.path.exists(rev_path)%0A
1803ec42e2eaad689dd51d3afb0b943e411f10d5
Add breath first search algorithm
breath_first_search/breath_first_search.py
breath_first_search/breath_first_search.py
Python
0.000049
@@ -0,0 +1,2616 @@ +#!/usr/bin/env python%0A%0Afrom collections import deque%0A%0A%0Aclass BreathFirstSearchGame(object):%0A def __init__(self):%0A # The node index are from 0 to 7, such as 0, 1, 2, 3, 4%0A self.node_number = 8%0A%0A # The edges to connect each node%0A self.edges = %5B(0, 1), (0, 3), (1, 2), (1, 5), (2, 7), (3, 4), (3, 6),%0A (4, 5), (5, 7)%5D%0A%0A # The 8 * 8 matrix of boolean values, only updated by the edges%0A self.graph = %5B%5BFalse for j in range(self.node_number)%5D%0A for i in range(self.node_number)%5D%0A #print(self.graph)%0A%0A # The queue of open set, which is an array%0A self.open_set = deque()%0A%0A # The source and destination nodes for this game%0A self.source_node = 0%0A self.destination_node = 7%0A%0A # The 8 array of boolean which means this node is visited%0A self.is_visit_node_array = %5BFalse for i in range(self.node_number)%5D%0A%0A # The 8 array of int which means this node's best parent node id%0A self.best_parent_node_array = %5B-1 for i in range(self.node_number)%5D%0A%0A self.initialize_internal_variables()%0A #print(self.graph)%0A%0A self.travel_and_update_variables()%0A%0A self.travel_desination_path(self.destination_node)%0A%0A def initialize_internal_variables(self):%0A # Update the graph with edges%0A for i, j in self.edges:%0A self.graph%5Bi%5D%5Bj%5D = True%0A self.graph%5Bj%5D%5Bi%5D = True%0A%0A # Update the open set with the source nodes%0A self.open_set.append(self.source_node)%0A self.is_visit_node_array%5Bself.source_node%5D = True%0A self.best_parent_node_array%5Bself.source_node%5D = self.source_node%0A%0A def travel_and_update_variables(self):%0A # Travel if some nodes in open set%0A while len(self.open_set) %3E 0:%0A%0A current_node = self.open_set.popleft()%0A%0A for other_node in range(self.node_number):%0A%0A #import ipdb;ipdb.set_trace()%0A # Check if these two nodes are connected%0A if self.graph%5Bcurrent_node%5D%5Bother_node%5D:%0A%0A # Check if the other node is visited%0A if self.is_visit_node_array%5Bother_node%5D == False:%0A%0A # Update the open set and visited array%0A self.open_set.append(other_node)%0A self.best_parent_node_array%5Bother_node%5D = current_node%0A self.is_visit_node_array%5Bother_node%5D = True%0A%0A def travel_desination_path(self, destination_node):%0A%0A if destination_node == self.source_node:%0A print(destination_node)%0A%0A else:%0A self.travel_desination_path(%0A self.best_parent_node_array%5Bdestination_node%5D)%0A print(destination_node)%0A%0A%0Adef main():%0A print(%22Start breath first search%22)%0A%0A game = BreathFirstSearchGame()%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
c98eff8545c90563246a53994fe8f65faaf76b0a
Add fetch recipe for the open source infra repo.
recipes/infra.py
recipes/infra.py
Python
0.00002
@@ -0,0 +1,969 @@ +# Copyright 2014 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Aimport sys%0A%0Aimport recipe_util # pylint: disable=F0401%0A%0A%0A# This class doesn't need an __init__ method, so we disable the warning%0A# pylint: disable=W0232%0Aclass Infra(recipe_util.Recipe):%0A %22%22%22Basic Recipe class for the Infrastructure repositories.%22%22%22%0A%0A @staticmethod%0A def fetch_spec(props):%0A url = 'https://chromium.googlesource.com/infra/infra.git'%0A solution = %7B%0A 'name' : 'infra',%0A 'url' : url,%0A 'deps_file': 'DEPS',%0A 'managed' : False,%0A %7D%0A spec = %7B%0A 'solutions': %5Bsolution%5D,%0A %7D%0A return %7B%0A 'type': 'gclient_git',%0A 'gclient_git_spec': spec,%0A %7D%0A%0A @staticmethod%0A def expected_root(_props):%0A return 'infra'%0A%0A%0Adef main(argv=None):%0A return Infra().handle_args(argv)%0A%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv))%0A
5b3b5bb145eea8a71c81a383d2bdac7ecf13f98e
Add sys module tests
tests/integration/modules/sysmod.py
tests/integration/modules/sysmod.py
Python
0.000001
@@ -0,0 +1,619 @@ +# Import python libs%0Aimport os%0A%0A# Import salt libs%0Aimport integration%0A%0Aclass SysModuleTest(integration.ModuleCase):%0A '''%0A Validate the sys module%0A '''%0A def test_list_functions(self):%0A '''%0A sys.list_functions%0A '''%0A funcs = self.run_function('sys.list_functions')%0A self.assertTrue('hosts.list_hosts' in funcs)%0A self.assertTrue('pkg.install' in funcs)%0A%0A def test_list_modules(self):%0A '''%0A sys.list_moduels%0A '''%0A mods = self.run_function('sys.list_modules')%0A self.assertTrue('hosts' in mods)%0A self.assertTrue('pkg' in mods)%0A
7d800a0fc2d94cad14e825faa27e1f5b2d2cbed8
Create new package (#6648)
var/spack/repos/builtin/packages/breseq/package.py
var/spack/repos/builtin/packages/breseq/package.py
Python
0
@@ -0,0 +1,1885 @@ +##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass Breseq(AutotoolsPackage):%0A %22%22%22breseq is a computational pipeline for finding mutations relative to a%0A reference sequence in short-read DNA re-sequencing data for haploid%0A microbial-sized genomes.%22%22%22%0A%0A homepage = %22http://barricklab.org/breseq%22%0A url = %22https://github.com/barricklab/breseq/archive/v0.31.1.tar.gz%22%0A%0A version('0.31.1', 'a4e602d5481f8692833ba3d5a3cd0394')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A%0A depends_on('bedtools2', type='run')%0A depends_on('r', type='run')%0A
19cf7a2833ba2ffcff46bd4543ed93fd80c1d8ea
fix trying to run configure on an already configured directory fixes #2959 (#2961)
var/spack/repos/builtin/packages/libmng/package.py
var/spack/repos/builtin/packages/libmng/package.py
############################################################################## # Copyright (c) 2013-2016, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, [email protected], All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/llnl/spack # Please also see the LICENSE file for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Libmng(AutotoolsPackage): """libmng -THE reference library for reading, displaying, writing and examining Multiple-Image Network Graphics. MNG is the animation extension to the popular PNG image-format.""" homepage = "http://sourceforge.net/projects/libmng/" url = "http://downloads.sourceforge.net/project/libmng/libmng-devel/2.0.2/libmng-2.0.2.tar.gz" version('2.0.2', '1ffefaed4aac98475ee6267422cbca55') depends_on("jpeg") depends_on("zlib") depends_on("lcms") def patch(self): # jpeg requires stdio to beincluded before its headrs. filter_file(r'^(\#include \<jpeglib\.h\>)', '#include<stdio.h>\n\\1', 'libmng_types.h')
Python
0
@@ -1958,8 +1958,105 @@ pes.h')%0A +%0A @run_before('configure')%0A def clean_configure_directory(self):%0A make('distclean')%0A
94f2ea927d9e218f2b5065456275d407164ddf0a
Add anidub.com tracker support
updatorr/tracker_handlers/handler_anidub.py
updatorr/tracker_handlers/handler_anidub.py
Python
0
@@ -0,0 +1,3303 @@ +from updatorr.handler_base import BaseTrackerHandler%0Afrom updatorr.utils import register_tracker_handler%0Aimport urllib2%0A%0A%0Aclass AnidubHandler(BaseTrackerHandler):%0A %22%22%22This class implements .torrent files downloads%0A for http://tr.anidub.com tracker.%22%22%22%0A%0A logged_in = False%0A # Stores a number of login attempts to prevent recursion.%0A login_counter = 0%0A%0A def get_torrent_file(self):%0A %22%22%22This is the main method which returns%0A a filepath to the downloaded file.%22%22%22%0A torrent_file = None%0A download_link = self.get_download_link()%0A if download_link is None:%0A self.dump_error('Cannot find torrent file download link at %25s' %25 self.resource_url)%0A else:%0A self.debug('Torrent download link found: %25s' %25 download_link)%0A torrent_file = self.download_torrent(download_link)%0A return torrent_file%0A%0A def get_id_from_link(self):%0A %22%22%22Returns forum thread identifier from full thread URL.%22%22%22%0A return self.resource_url.split('=')%5B1%5D%0A%0A def login(self, login, password):%0A %22%22%22Implements tracker login procedure.%22%22%22%0A self.logged_in = False%0A%0A if login is None or password is None:%0A return False%0A%0A self.login_counter += 1%0A%0A # No recursion wanted.%0A if self.login_counter %3E 1:%0A return False%0A%0A login_url = 'http://tr.anidub.com/takelogin.php'%0A self.debug('Trying to login at %25s ...' %25 login_url)%0A form_data = %7B%0A 'username': login,%0A 'password': password,%0A %7D%0A self.get_resource(login_url, form_data)%0A cookies = self.get_cookies()%0A%0A # Login success check.%0A if cookies.get('uid') is not None:%0A self.logged_in = True%0A return self.logged_in%0A%0A def get_download_link(self):%0A %22%22%22Tries to find .torrent file download link at forum thread page%0A and return that one.%22%22%22%0A response, page_html = self.get_resource(self.resource_url)%0A page_links = self.find_links(page_html)%0A download_link = None%0A for page_link in page_links:%0A if 'login.php?returnto=' in page_link:%0A download_link = None%0A self.debug('Login is required to download torrent file.')%0A if self.login(self.get_settings('login'), self.get_settings('password')):%0A download_link = self.get_download_link()%0A if 'download.php?id=' in page_link:%0A download_link = 'http://tr.anidub.com/'+urllib2.unquote(page_link).replace(%22&amp;%22, %22&%22)%0A return download_link%0A%0A def download_torrent(self, url):%0A %22%22%22Gets .torrent file contents from given URL and%0A stores that in a temporary file within a filesystem.%0A Returns a path to that file.%0A%0A %22%22%22%0A self.debug('Downloading torrent file from %25s ...' %25 url)%0A # That was a check that user himself visited torrent's page ;)%0A cookies = self.get_cookies()%0A #self.set_cookie('uid', self.get_id_from_link())%0A contents = self.get_resource(url, %7B%7D)%5B1%5D%0A return self.store_tmp_torrent(contents)%0A%0A%0A# With that one we tell updatetorr to handle links to %60rutracker.org%60 domain with RutrackerHandler class.%0Aregister_tracker_handler('tr.anidub.com', AnidubHandler)%0A
2b6456e5724f62b7b170252a6bf7d076768d1167
Fix broken Hypothesis test
tests/test_metasync.py
tests/test_metasync.py
# -*- coding: utf-8 -*- from hypothesis import given import hypothesis.strategies as st import pytest from vdirsyncer.metasync import MetaSyncConflict, metasync from vdirsyncer.storage.base import normalize_meta_value from vdirsyncer.storage.memory import MemoryStorage from . import blow_up def test_irrelevant_status(): a = MemoryStorage() b = MemoryStorage() status = {'foo': 'bar'} metasync(a, b, status, keys=()) assert not status def test_basic(monkeypatch): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == 'bar' a.set_meta('foo', 'baz') metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == 'baz' monkeypatch.setattr(a, 'set_meta', blow_up) monkeypatch.setattr(b, 'set_meta', blow_up) metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == 'baz' monkeypatch.undo() monkeypatch.undo() b.set_meta('foo', None) metasync(a, b, status, keys=['foo']) assert not a.get_meta('foo') and not b.get_meta('foo') def test_conflict(): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') b.set_meta('foo', 'baz') with pytest.raises(MetaSyncConflict): metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == 'bar' assert b.get_meta('foo') == 'baz' assert not status def test_conflict_same_content(): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') b.set_meta('foo', 'bar') metasync(a, b, status, keys=['foo']) assert a.get_meta('foo') == b.get_meta('foo') == status['foo'] == 'bar' @pytest.mark.parametrize('wins', 'ab') def test_conflict_x_wins(wins): a = MemoryStorage() b = MemoryStorage() status = {} a.set_meta('foo', 'bar') b.set_meta('foo', 'baz') metasync(a, b, status, keys=['foo'], conflict_resolution='a wins' if wins == 'a' else 'b wins') assert a.get_meta('foo') == b.get_meta('foo') == status['foo'] == ( 'bar' if wins == 'a' else 'baz' ) keys = st.text(min_size=1).filter(lambda x: x.strip() == x) values = st.text().filter(lambda x: normalize_meta_value(x) == x) metadata = st.dictionaries(keys, values) @given( a=metadata, b=metadata, status=metadata, keys=st.sets(keys), conflict_resolution=st.just('a wins') | st.just('b wins') ) def test_fuzzing(a, b, status, keys, conflict_resolution): def _get_storage(m, instance_name): s = MemoryStorage(instance_name=instance_name) s.metadata = m return s a = _get_storage(a, 'A') b = _get_storage(b, 'B') winning_storage = (a if conflict_resolution == 'a wins' else b) expected_values = dict((key, winning_storage.get_meta(key)) for key in keys) metasync(a, b, status, keys=keys, conflict_resolution=conflict_resolution) for key in keys: assert a.get_meta(key) == b.get_meta(key) == status.get(key, '') if expected_values[key]: assert status[key] == expected_values[key]
Python
0.000151
@@ -46,16 +46,25 @@ rt given +, example %0Aimport @@ -2540,16 +2540,120 @@ ins')%0A)%0A +@example(a=%7Bu'0': u'0'%7D, b=%7B%7D, status=%7Bu'0': u'0'%7D, keys=%7Bu'0'%7D,%0A conflict_resolution='a wins')%0A def test @@ -3189,16 +3189,48 @@ n keys:%0A + s = status.get(key, '')%0A @@ -3279,93 +3279,56 @@ == s -tatus.get(key, '')%0A if expected_values%5Bkey%5D:%0A assert status%5Bkey%5D == +%0A assert s == expected_values%5Bkey%5D or not exp @@ -3344,9 +3344,18 @@ ues%5Bkey%5D + or not s %0A
b02ec9a16689bf2814e85f0edb01c7f4a5926214
Add pre-migration script for account module.
addons/account/migrations/8.0.1.1/pre-migration.py
addons/account/migrations/8.0.1.1/pre-migration.py
Python
0
@@ -0,0 +1,1375 @@ +# -*- coding: utf-8 -*-%0A##############################################################################%0A#%0A# Copyright (C) 2014 Akretion (http://www.akretion.com/)%0A# @author: Alexis de Lattre %[email protected]%3E%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as%0A# published by the Free Software Foundation, either version 3 of the%0A# License, or (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A#%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A##############################################################################%0A%0Afrom openerp.openupgrade import openupgrade%0A%0A%0Adef migrate(cr, version):%0A if not version:%0A return%0A%0A cr.execute(%0A %22%22%22SELECT id FROM account_analytic_journal WHERE type='purchase' %22%22%22)%0A res = cr.fetchone()%0A print %22mig account res=%22, res%0A if res:%0A openupgrade.add_xmlid(%0A cr, 'account', 'exp', 'account.analytic.journal', res%5B0%5D, True)%0A
07841312d062fd0dd48baa0d3bc0d92989e05841
add script mp3-file-renamer.py
mp3-file-renamer.py
mp3-file-renamer.py
Python
0.000001
@@ -0,0 +1,1667 @@ +#!/usr/bin/python%0A#Python script to rename mp3 files according to the format%0A#%22Track-number Track-name.mp3%22, for example: 02 Self Control.mp3%0A#Note: Tracks must have valid ID3 data for this to work - python-mutagen is required.%0A#By Charles Bos%0A%0Aimport os%0Aimport sys%0Afrom mutagen.id3 import ID3, ID3NoHeaderError%0A%0Adef usage() :%0A print('''Usage:%0A mp3-file-renamer.py %3Cpath to music%3E''')%0A%0A#Get music directory%0Aargs = sys.argv%0Aif (len(args) != 2) or (args%5B1%5D == '-h') or (args%5B1%5D == '--help') :%0A usage()%0A os._exit(0)%0Aelse :%0A if os.path.exists(args%5B1%5D) : musicDir = args%5B1%5D%0A else :%0A usage()%0A os._exit(0)%0A%0A#Get titles and track numbers for songs%0AmusicFiles = %5B%5D%0Atracknums = %5B%5D%0Atitles = %5B%5D%0A%0Afor root, dirs, files in os.walk(musicDir, topdown=False):%0A for name in files:%0A if name.find(%22.mp3%22) != -1 : %0A musicFiles.append(os.path.join(root, name))%0A%0Afor x in musicFiles :%0A try :%0A audio = ID3(x)%0A titles.append(str(audio%5B%22TIT2%22%5D.text%5B0%5D))%0A tracknums.append(str(audio%5B%22TRCK%22%5D.text%5B0%5D))%0A except (ID3NoHeaderError, KeyError) :%0A musicFiles.remove(x)%0A%0A#Add leading 0 if missing%0Afor x in tracknums :%0A if len(x) == 1 : tracknums%5Btracknums.index(x)%5D = %220%22 + x%0A%0Aif (len(tracknums) != len(titles)) or (len(tracknums) == len(titles) == 0) :%0A print(%22No valid music files found. Nothing to do. Exiting...%22)%0A os._exit(0)%0Aelse :%0A #Start renaming%0A def getPath(origSong) :%0A return origSong.rfind(%22/%22) + 1%0A %0A counter = 0%0A for x in musicFiles :%0A path = x%5B:getPath(x)%5D%0A os.rename(x, path + tracknums%5Bcounter%5D + %22 %22 + titles%5Bcounter%5D + %22.mp3%22)%0A counter += 1
cc76c00efa919f8532e21365606f38431093cc22
Write inversion counting algorithm
count_inversions.py
count_inversions.py
Python
0.0007
@@ -0,0 +1,2447 @@ +def count_inversions(list, inversion_count = 0):%0A %22%22%22%0A recursively counts inversions of halved lists%0A where inversions are instances where a larger el occurs before a smaller el%0A merges the halved lists and increments the inversion count at each level%0A%0A :param list list: list containing comparable elements%0A :param list list: list containing comparable elements%0A :returns: tuple w merged list and number of inversions%0A %22%22%22%0A if len(list) %3C 2:%0A return (list, inversion_count)%0A mid_point = len(list) / 2%0A # recursively count inversions in 1st half of input%0A first_half = count_inversions(list%5B0:mid_point%5D, inversion_count)%0A # recursively count inversions in 2nd half of input%0A second_half = count_inversions(list%5Bmid_point:len(list)%5D, inversion_count)%0A%0A # TODO: indexing into the returned tuple is confusing%0A # consider returning a dict instead%0A running_inversion_count = first_half%5B1%5D + second_half%5B1%5D%0A return merge_and_count_inversions(first_half%5B0%5D, second_half%5B0%5D, running_inversion_count)%0A%0Adef merge_and_count_inversions(a, b, inversion_count):%0A %22%22%22%0A steps through indexes in both input lists, appending the smaller val to the merged list at each step%0A increments the inversion count when els from list b are appended to the output before a is exhausted%0A%0A :param list a: ordered list%0A :param list b: ordered list%0A :returns: tuple w merged list and number of inversions%0A %22%22%22%0A i = 0%0A j = 0%0A total_len = len(a) + len(b)%0A merged = %5B%5D%0A for k in range(total_len):%0A try:%0A a%5Bi%5D%0A except IndexError:%0A # concat merged w remainder of b if a's finished%0A merged = merged + b%5Bj:len(b)%5D%0A j += 1%0A return (merged, inversion_count)%0A%0A try:%0A b%5Bj%5D%0A except IndexError:%0A # concat merged w remainder of a if b's finished%0A merged = merged + a%5Bi:len(a)%5D%0A i += 1%0A return (merged, inversion_count)%0A%0A if a%5Bi%5D %3C b%5Bj%5D:%0A merged.append(a%5Bi%5D)%0A i += 1%0A else:%0A merged.append(b%5Bj%5D)%0A j += 1%0A # increment inversion_count by num els remaining in a if a isn't exhausted%0A try:%0A a%5Bi%5D%0A # inversion_count = len(a) - i%0A remaining_in_a = len(a) - i%0A inversion_count = inversion_count + remaining_in_a%0A except IndexError:%0A pass # a is exhausted%0A%0A return (merged, inversion_count)%0A%0Alist = %5B 1, 2, 9, -1, 0%5D%0Aprint count_inversions(list)%5B1%5D%0A%0A# a = %5B1, 3, 5, 6%5D%0A# b = %5B2, 4, 7, 8, 9%5D%0A# print merge_and_count_inversions(a, b)
3331a9a6b8ada075aaefef021a8ad24a49995931
Add test for prepare_instance_slug #92
derrida/books/tests/test_search_indexes.py
derrida/books/tests/test_search_indexes.py
Python
0
@@ -0,0 +1,1411 @@ +from unittest.mock import patch%0Afrom django.test import TestCase%0Afrom derrida.books.models import Reference, Instance%0Afrom derrida.books.search_indexes import ReferenceIndex%0A%0A%0Aclass TestReferenceIndex(TestCase):%0A fixtures = %5B'test_references.json'%5D%0A%0A def setUp(self):%0A '''None of the Instacefixtures have slugs, so generate them'''%0A for instance in Instance.objects.all():%0A instance.slug = instance.generate_safe_slug()%0A instance.save()%0A%0A def test_prepare_instance_slug(self):%0A%0A # create a ReferenceIndex object%0A refindex = ReferenceIndex()%0A # get a reference%0A reference = Reference.objects.first()%0A # not a book section (none in test set are)%0A # should return the slug of its instance%0A slug = refindex.prepare_instance_slug(reference)%0A assert slug == reference.instance.slug%0A%0A # create a work as a 'collected in'%0A ecrit = Instance.objects.get(slug__icontains='lecriture-et-la')%0A debat = Instance.objects.get(slug__icontains='le-debat-sur')%0A # make ecrit a 'section' of debat%0A ecrit.collected_in = debat%0A ecrit.save()%0A # get a reference from ecrit%0A reference = Reference.objects.filter(instance=ecrit).first()%0A # should return the slug for debat not ecrit%0A slug = refindex.prepare_instance_slug(reference)%0A assert slug == debat.slug%0A
451799f126afcdda70138dc348b9e1f276b1f86f
Add setting file for later use.
ox_herd/settings.py
ox_herd/settings.py
Python
0
@@ -0,0 +1,64 @@ +%22%22%22Module to represent basic settings for ox_herd package.%0A%22%22%22%0A%0A
ec5136b86cce92a49cf2eea852f1d8f2d7110cf0
Create element_search.py
09-revisao/practice_python/element_search.py
09-revisao/practice_python/element_search.py
Python
0.000002
@@ -0,0 +1,1102 @@ +#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%22%22%22Exercise 20: Element Search%0A%0AWrite a function that takes an ordered list of numbers (a list where the%0Aelements are in order from smallest to largest) and another number. The%0Afunction decides whether or not the given number is inside the list and%0Areturns (then prints) an appropriate boolean.%0A%0AExtras:%0A%0A Use binary search.%0A%22%22%22%0A%0A%0Adef in_list(a_list, number):%0A return True if %5BTrue for i in a_list if i == number%5D else False%0A%0A%0Adef in_list2(a_list, number):%0A if len(a_list) == 1:%0A return a_list%5B0%5D == number%0A elif a_list%5Blen(a_list) // 2%5D %3E number:%0A return in_list2(a_list%5B:len(a_list) // 2%5D, number)%0A else:%0A return in_list2(a_list%5Blen(a_list) // 2:%5D, number)%0A%0A%0Aif __name__ == %22__main__%22:%0A%0A a_list = %5B1, 3, 4, 5, 6, 7, 8, 12, 15, 20, 23, 33, 45, 64%5D%0A number = int(input(%22Enter a number: %22))%0A%0A print(%0A %22The number %25i is in the list %25s: %25s%22 %25%0A (number, a_list, in_list(a_list, number)))%0A%0A print(%0A %22The number %25i is in the list %25s: %25s%22 %25%0A (number, a_list, in_list2(a_list, number)))%0A
7060b82030d719cdcbdcecdb5eb7d34b405aa805
Make the migration for previous commit
platforms/migrations/0003_auto_20150718_0050.py
platforms/migrations/0003_auto_20150718_0050.py
Python
0.000013
@@ -0,0 +1,447 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0Aimport jsonfield.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('platforms', '0002_auto_20150718_0042'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='platform',%0A name='default_installer',%0A field=jsonfield.fields.JSONField(null=True),%0A ),%0A %5D%0A
9f4861c4cfc919cffc234cc40044d3bc84e8b086
Implement statsd prefix sanitization
gunicorn/instrument/statsd.py
gunicorn/instrument/statsd.py
# -*- coding: utf-8 - # # This file is part of gunicorn released under the MIT license. # See the NOTICE for more information. "Bare-bones implementation of statsD's protocol, client-side" import re import socket import logging from gunicorn.glogging import Logger # Instrumentation constants STATSD_DEFAULT_PORT = 8125 METRIC_VAR = "metric" VALUE_VAR = "value" MTYPE_VAR = "mtype" GAUGE_TYPE = "gauge" COUNTER_TYPE = "counter" HISTOGRAM_TYPE = "histogram" class Statsd(Logger): """statsD-based instrumentation, that passes as a logger """ def __init__(self, cfg): """host, port: statsD server """ Logger.__init__(self, cfg) prefix_regex = re.compile(r"\.*$") self.prefix = prefix_regex.sub(".", cfg.statsd_prefix) try: host, port = cfg.statsd_host self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.sock.connect((host, int(port))) except Exception: self.sock = None # Log errors and warnings def critical(self, msg, *args, **kwargs): Logger.critical(self, msg, *args, **kwargs) self.increment("gunicorn.log.critical", 1) def error(self, msg, *args, **kwargs): Logger.error(self, msg, *args, **kwargs) self.increment("gunicorn.log.error", 1) def warning(self, msg, *args, **kwargs): Logger.warning(self, msg, *args, **kwargs) self.increment("gunicorn.log.warning", 1) def exception(self, msg, *args, **kwargs): Logger.exception(self, msg, *args, **kwargs) self.increment("gunicorn.log.exception", 1) # Special treatement for info, the most common log level def info(self, msg, *args, **kwargs): self.log(logging.INFO, msg, *args, **kwargs) # skip the run-of-the-mill logs def debug(self, msg, *args, **kwargs): self.log(logging.DEBUG, msg, *args, **kwargs) def log(self, lvl, msg, *args, **kwargs): """Log a given statistic if metric, value and type are present """ try: extra = kwargs.get("extra", None) if extra is not None: metric = extra.get(METRIC_VAR, None) value = extra.get(VALUE_VAR, None) typ = extra.get(MTYPE_VAR, None) if metric and value and typ: if typ == GAUGE_TYPE: self.gauge(metric, value) elif typ == COUNTER_TYPE: self.increment(metric, value) elif typ == HISTOGRAM_TYPE: self.histogram(metric, value) else: pass # Log to parent logger only if there is something to say if msg is not None and len(msg) > 0: Logger.log(self, lvl, msg, *args, **kwargs) except Exception: pass # access logging def access(self, resp, req, environ, request_time): """Measure request duration request_time is a datetime.timedelta """ Logger.access(self, resp, req, environ, request_time) duration_in_ms = request_time.seconds * 1000 + float(request_time.microseconds) / 10 ** 3 self.histogram("gunicorn.request.duration", duration_in_ms) self.increment("gunicorn.requests", 1) self.increment("gunicorn.request.status.%d" % int(resp.status.split()[0]), 1) # statsD methods # you can use those directly if you want def gauge(self, name, value): try: if self.sock: self.sock.send("{0}{1}:{2}|g".format(self.prefix, name, value)) except Exception: pass def increment(self, name, value, sampling_rate=1.0): try: if self.sock: self.sock.send("{0}{1}:{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) except Exception: pass def decrement(self, name, value, sampling_rate=1.0): try: if self.sock: self.sock.send("{0){1}:-{2}|c|@{3}".format(self.prefix, name, value, sampling_rate)) except Exception: pass def histogram(self, name, value): try: if self.sock: self.sock.send("{0}{1}:{2}|ms".format(self.prefix, name, value)) except Exception: pass
Python
0.998375
@@ -670,83 +670,54 @@ +self. prefix -_regex = re.compile(r%22%5C.*$%22)%0A self.prefix = prefix_regex.sub(%22 + = re.sub(r%22%5E(.+%5B%5E.%5D+)%5C.*$%22, %22%5Cg%3C1%3E .%22,
582c911c5a18659ca4c95bf434294cfeedf1843c
Add line parser for evidence import
src/ggrc/converters/handlers/document.py
src/ggrc/converters/handlers/document.py
# Copyright (C) 2016 Google Inc. # Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file> """Handlers document entries.""" from flask import current_app from ggrc import models from ggrc.login import get_current_user_id from ggrc.converters.handlers import handlers class RequestLinkHandler(handlers.ColumnHandler): """Base class for request documents handlers.""" @staticmethod def _parse_line(line): """Parse a single line and return link and title. Args: line: string containing a single line from a cell. Returns: tuple containing a link and a title. """ line = line.strip() return line, line def parse_item(self, has_title=False): """Parse document link lines. Returns: list of documents for all URLs and evidences. """ documents = [] user_id = get_current_user_id() for line in self.raw_value.splitlines(): link, title = self._parse_line(line) documents.append(models.Document( link=link, title=title, modified_by_id=user_id, context=self.row_converter.obj.context, )) return documents @staticmethod def _get_link_str(documents): """Generate a new line separated string for all document links. Returns: string containing all URLs and titles. """ return "\n".join( "{} {}".format(document.link, document.title) for document in documents ) def set_obj_attr(self): self.value = self.parse_item() class RequestEvidenceHandler(RequestLinkHandler): """Handler for evidence field on request imports.""" def get_value(self): return self._get_link_str(self.row_converter.obj.documents) def insert_object(self): """Update request evidence values This function adds missing evidence and remove existing ones from requests. The existing evidence with new titles just change the title. """ if not self.value or self.row_converter.ignore: return new_link_map = {doc.link: doc for doc in self.value} old_link_map = {doc.link: doc for doc in self.row_converter.obj.documents} for new_link, new_doc in new_link_map.items(): if new_link in old_link_map: old_link_map[new_link].title = new_doc.title else: self.row_converter.obj.documents.append(new_doc) for old_link, old_doc in old_link_map.items(): if old_link not in new_link_map: self.row_converter.obj.documents.remove(old_doc) def set_value(self): """This should be ignored with second class attributes.""" class RequestUrlHandler(RequestLinkHandler): """Handler for URL field on request imports.""" def get_value(self): documents = [doc for doc in self.row_converter.obj.related_objects() if isinstance(doc, models.Document)] return self._get_link_str(documents) def insert_object(self): """Update request URL values This function adds missing URLs and remove existing ones from requests. The existing URLs with new titles just change the title. """ if not self.value or self.row_converter.ignore: return new_link_map = {doc.link: doc for doc in self.value} old_link_map = {doc.link: doc for doc in self.row_converter.obj.related_objects() if isinstance(doc, models.Document)} for new_link, new_doc in new_link_map.items(): if new_link in old_link_map: old_link_map[new_link].title = new_doc.title else: models.Relationship( source=self.row_converter.obj, destination=new_doc, ) for old_link, old_doc in old_link_map.items(): if old_link not in new_link_map: if old_doc in self.row_converter.obj.related_destinations: self.row_converter.obj.related_destinations.remove(old_doc) elif old_doc in self.row_converter.obj.related_sources: self.row_converter.obj.related_sources.remove(old_doc) else: current_app.logger.warning("Invalid relationship state for request " "URLs.") def set_value(self): """This should be ignored with second class attributes."""
Python
0.000001
@@ -1616,32 +1616,392 @@ st imports.%22%22%22%0A%0A + @staticmethod%0A def _parse_line(line):%0A %22%22%22Parse a single line and return link and title.%0A%0A Args:%0A line: string containing a single line from a cell.%0A%0A Returns:%0A tuple containing a link and a title.%0A %22%22%22%0A parts = line.strip().split()%0A if len(parts) == 1:%0A return parts%5B0%5D, parts%5B0%5D%0A%0A return parts%5B0%5D, %22 %22.join(parts%5B1:%5D)%0A%0A def get_value(
3b42e348987294602440c3c1d4aa4361afcdc298
Add problem 14
problem_14.py
problem_14.py
Python
0
@@ -0,0 +1,2558 @@ +from problem_12 import new_encryption_oracle, find_blocksize%0Aimport random%0Afrom string import printable%0A%0ARANDOM_PREFIX = ''.join(random.choice(printable) for _ in range(random.randrange(0, 20)))%0A# print len(RANDOM_PREFIX)%0A%0A%0Adef oracle(adversary_input):%0A return new_encryption_oracle(RANDOM_PREFIX + adversary_input)%0A%0A%0Adef find_oracle_added_length(blocksize):%0A adversary_input = ''%0A previous_length = len(oracle(adversary_input))%0A while True:%0A adversary_input += '0'%0A current_length = len(oracle(adversary_input))%0A if current_length %3E previous_length:%0A return current_length - len(adversary_input) - blocksize%0A%0A%0Adef find_padding_length(blocksize):%0A adversary_input = '0'*64%0A zero_encrypted_block = oracle(adversary_input)%5B2*blocksize:3*blocksize%5D%0A change_counter = 1%0A while True:%0A adversary_input = change_counter*'1' + '0'*(64-change_counter)%0A current_second_block = oracle(adversary_input)%5B2*blocksize:3*blocksize%5D%0A if current_second_block != zero_encrypted_block:%0A return 2*blocksize - change_counter + 1%0A change_counter += 1%0A%0A%0Adef find_single_ecb_character(blocksize, decrypted, start_padding_length, unknown_text_length):%0A bypass_start_padding = '0'*(2*blocksize - start_padding_length)%0A input_padding = bypass_start_padding + '0'*(blocksize*(unknown_text_length/blocksize + 1) - len(decrypted) - 1)%0A test_padding = input_padding + decrypted%0A block_position = (len(test_padding) - len(bypass_start_padding))/blocksize%0A%0A ciphertext = oracle(input_padding)%5B2*blocksize:%5D%0A cipher_blocks = %5Bciphertext%5Bi*blocksize:(i+1)*blocksize%5D for i in range(len(ciphertext)/blocksize)%5D%0A%0A for test_char in printable:%0A test_character = test_padding + test_char%0A%0A test_character_ciphertext = oracle(test_character)%5B2*blocksize:%5D%0A test_blocks = %5Btest_character_ciphertext%5Bi*blocksize:(i+1)*blocksize%5D for i in range(len(test_character_ciphertext)/blocksize)%5D%0A%0A if test_blocks%5Bblock_position%5D == cipher_blocks%5Bblock_position%5D:%0A return test_char%0A%0A%0Aif __name__ == '__main__':%0A blocksize = find_blocksize(oracle)%0A%0A oracle_added_length = find_oracle_added_length(blocksize)%0A start_padding_length = find_padding_length(blocksize)%0A unknown_text_length = oracle_added_length - start_padding_length%0A%0A decrypted = ''%0A while len(decrypted) %3C unknown_text_length:%0A decrypted += find_single_ecb_character(blocksize, decrypted, start_padding_length, unknown_text_length)%0A print decrypted.decode('base64')%0A
49b1de4a68133e618723f96f2dc922b311bdd982
Add Script to encode raw RGB565
util/encode_raw.py
util/encode_raw.py
Python
0
@@ -0,0 +1,603 @@ +#!/usr/bin/env python%0A# Converts raw RGB565 video to MP4/AVI%0A%0Afrom sys import argv, exit%0Afrom array import array%0Afrom subprocess import call%0A%0Abuf=None%0ATMP_FILE = %22/tmp/video.raw%22%0A%0Aif (len(argv) != 4):%0A print(%22Usage: encode_raw input.raw output.avi fps%22)%0A exit(1)%0A%0Awith open(argv%5B1%5D, %22rb%22) as f:%0A buf = array(%22H%22, f.read())%0A%0A#Swap not needed if rgb565be is supported%0Abuf.byteswap()%0Awith open(TMP_FILE, %22wb%22) as f:%0A f.write(buf.tostring())%0A%0Acmd = %22ffmpeg -vcodec rawvideo -r %25d -f rawvideo -pix_fmt rgb565 -s 160x120 -i %25s -vcodec mpeg4 %25s%22%25(int(argv%5B3%5D), TMP_FILE, argv%5B2%5D)%0Acall(cmd.split())%0A
74354263acb3399295e7fde18d6aeed4b7bb7397
Fix maybe all flake8 errors. Add first test.
what_transcode/tests.py
what_transcode/tests.py
Python
0
@@ -0,0 +1,3233 @@ +%22%22%22%0AThis file demonstrates writing tests using the unittest module. These will pass%0Awhen you run %22manage.py test%22.%0A%0AReplace this with more appropriate tests for your application.%0A%22%22%22%0A%0Afrom django.test import TestCase%0A%0Afrom what_transcode.utils import get_mp3_ids%0A%0A%0Aclass UtilsTests(TestCase):%0A def test_get_mp3_ids(self):%0A what_group = %7B%0A 'torrents': %5B%0A %7B%0A 'id': 0,%0A 'format': 'FLAC',%0A 'encoding': 'Lossless',%0A 'media': 'CD',%0A 'remastered': False,%0A 'remasterCatalogueNumber': None,%0A 'remasterRecordLabel': None,%0A 'remasterTitle': None,%0A 'remasterYear': None,%0A %7D,%0A %7B%0A 'id': 1,%0A 'format': 'MP3',%0A 'encoding': '320',%0A 'media': 'CD',%0A 'remastered': False,%0A 'remasterCatalogueNumber': None,%0A 'remasterRecordLabel': None,%0A 'remasterTitle': None,%0A 'remasterYear': None,%0A %7D,%0A %7B%0A 'id': 2,%0A 'format': 'FLAC',%0A 'encoding': 'Lossless',%0A 'media': 'CD',%0A 'remastered': True,%0A 'remasterCatalogueNumber': 'catno',%0A 'remasterRecordLabel': None,%0A 'remasterTitle': None,%0A 'remasterYear': None,%0A %7D,%0A %7B%0A 'id': 3,%0A 'format': 'FLAC',%0A 'encoding': 'Lossless',%0A 'media': 'WEB',%0A 'remastered': False,%0A 'remasterCatalogueNumber': None,%0A 'remasterRecordLabel': None,%0A 'remasterTitle': None,%0A 'remasterYear': None,%0A %7D,%0A %7B%0A 'id': 4,%0A 'format': 'MP3',%0A 'encoding': 'V0 (VBR)',%0A 'media': 'WEB',%0A 'remastered': False,%0A 'remasterCatalogueNumber': None,%0A 'remasterRecordLabel': None,%0A 'remasterTitle': None,%0A 'remasterYear': None,%0A %7D,%0A %7B%0A 'id': 5,%0A 'format': 'MP3',%0A 'encoding': 'V2 (VBR)',%0A 'media': 'WEB',%0A 'remastered': False,%0A 'remasterCatalogueNumber': None,%0A 'remasterRecordLabel': None,%0A 'remasterTitle': None,%0A 'remasterYear': None,%0A %7D,%0A %5D%0A %7D%0A self.assertEqual(get_mp3_ids(what_group, %7B%0A 'torrent': what_group%5B'torrents'%5D%5B0%5D%0A %7D), %7B'320': 1%7D)%0A self.assertEqual(get_mp3_ids(what_group, %7B%0A 'torrent': what_group%5B'torrents'%5D%5B2%5D%0A %7D), %7B%7D)%0A self.assertEqual(get_mp3_ids(what_group, %7B%0A 'torrent': what_group%5B'torrents'%5D%5B3%5D%0A %7D), %7B'V0': 4, 'V2': 5%7D)%0A
5dba86b3a68c27a01eb143a6dfdb35d01c3c99e8
add app_test
turbo/test/app_test.py
turbo/test/app_test.py
Python
0.000003
@@ -0,0 +1,1086 @@ +from __future__ import absolute_import, division, print_function, with_statement%0A%0Aimport os%0Aimport signal%0Aimport sys%0Aimport unittest%0Aimport random%0Aimport time%0Aimport threading%0Aimport logging%0Aimport requests%0Aimport multiprocessing%0A%0Afrom turbo import app%0Afrom turbo.conf import app_config%0Afrom turbo import register%0A%0Aapp_config.app_name = 'app_test'%0Aapp_config.web_application_setting = %7B%7D%0A%0Alogger = logging.getLogger()%0A%0Aprint(logger.level)%0A%0A%0Aclass HomeHandler(app.BaseBaseHandler):%0A%0A def get(self):%0A logger.info('get')%0A%0A%0Adef run_server():%0A register.register_url('/', HomeHandler)%0A app.start()%0A%0A%0Aclass AppTest(unittest.TestCase):%0A%0A def setUp(self):%0A server = multiprocessing.Process(target=run_server)%0A server.start()%0A self.localhost = 'http://localhost:8888'%0A self.pid = server.pid%0A logger.warning(self.pid)%0A%0A def tearDown(self):%0A os.kill(self.pid, signal.SIGKILL)%0A%0A def test_get(self):%0A resp = requests.get(self.localhost)%0A logger.warning(resp.status_code)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()
be0331e64726d659b824187fbc91b54ce0405615
add initial implementation of weighted EM PCA
wpca/test/test_empca.py
wpca/test/test_empca.py
Python
0
@@ -0,0 +1,1313 @@ +import numpy as np%0Afrom numpy.testing import assert_allclose%0A%0Afrom ..empca import orthonormalize, random_orthonormal, pca, empca%0A%0Adef norm_sign(X):%0A i_max_abs = np.argmax(abs(X), 0)%0A sgn = np.sign(X%5Bi_max_abs, range(X.shape%5B1%5D)%5D)%0A return X * sgn%0A%0A%0Adef assert_columns_allclose_upto_sign(A, B, *args, **kwargs):%0A assert_allclose(norm_sign(A), norm_sign(B), *args, **kwargs)%0A%0A%0Adef test_orthonormalize():%0A rand = np.random.RandomState(42)%0A X = rand.randn(3, 4)%0A X2 = orthonormalize(X)%0A assert_allclose(X%5B0%5D / np.linalg.norm(X%5B0%5D), X2%5B0%5D)%0A assert_allclose(np.dot(X2, X2.T), np.eye(X2.shape%5B0%5D), atol=1E-15)%0A%0A%0Adef test_random_orthonormal():%0A def check_random_orthonormal(n_samples, n_features):%0A X = random_orthonormal(n_samples, n_features, 42)%0A assert X.shape == (n_samples, n_features)%0A assert_allclose(np.dot(X, X.T), np.eye(X.shape%5B0%5D), atol=1E-15)%0A for n_samples in range(1, 6):%0A yield check_random_orthonormal, n_samples, 5%0A%0A%0Adef test_empca_vs_pca():%0A rand = np.random.RandomState(42)%0A X = rand.randn(50, 5)%0A W = np.ones_like(X)%0A evecs1, coeff1 = empca(X, W, 5, niter=100)%0A evecs2, coeff2 = pca(X, 5)%0A%0A assert_columns_allclose_upto_sign(evecs1.T, evecs2.T, rtol=1E-6)%0A assert_columns_allclose_upto_sign(coeff1, coeff2, rtol=1E-6)%0A
7ccfc89a51a76764c36b009dd9b5fc55570e3f56
Add forgot password test
api/radar_api/tests/test_forgot_password.py
api/radar_api/tests/test_forgot_password.py
Python
0.000001
@@ -0,0 +1,1710 @@ +import json%0A%0Afrom radar_api.tests.fixtures import get_user%0Afrom radar.database import db%0A%0A%0Adef test_forgot_password(app):%0A user = get_user('admin')%0A%0A client = app.test_client()%0A%0A assert user.reset_password_token is None%0A assert user.reset_password_date is None%0A%0A response = client.post('/forgot-password', data=%7B%0A 'username': user.username,%0A 'email': user.email%0A %7D)%0A%0A assert response.status_code == 200%0A%0A db.session.refresh(user)%0A%0A assert user.reset_password_token is not None%0A assert user.reset_password_date is not None%0A%0A%0Adef test_missing_username(app):%0A user = get_user('admin')%0A%0A client = app.test_client()%0A%0A response = client.post('/forgot-password', data=%7B%0A 'email': user.email,%0A %7D)%0A%0A assert response.status_code == 422%0A%0A data = json.loads(response.data)%0A%0A assert data == %7B%0A 'errors': %7B%0A 'username': %5B'This field is required.'%5D%0A %7D%0A %7D%0A%0A%0Adef test_missing_email(app):%0A user = get_user('admin')%0A%0A client = app.test_client()%0A%0A response = client.post('/forgot-password', data=%7B%0A 'username': user.username,%0A %7D)%0A%0A assert response.status_code == 422%0A%0A data = json.loads(response.data)%0A%0A assert data == %7B%0A 'errors': %7B%0A 'email': %5B'This field is required.'%5D%0A %7D%0A %7D%0A%0A%0Adef test_user_not_found(app):%0A client = app.test_client()%0A%0A response = client.post('/forgot-password', data=%7B%0A 'username': '404',%0A 'email': '[email protected]',%0A %7D)%0A%0A assert response.status_code == 422%0A%0A data = json.loads(response.data)%0A%0A assert data == %7B%0A 'errors': %7B%0A 'username': %5B'No user found with that username and email.'%5D%0A %7D%0A %7D%0A
b4bf757a15c404080679335bcce04ba45a7e4eae
Update fix_nonwarehouse_ledger_gl_entries_for_transactions.py
erpnext/patches/v7_0/fix_nonwarehouse_ledger_gl_entries_for_transactions.py
erpnext/patches/v7_0/fix_nonwarehouse_ledger_gl_entries_for_transactions.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): if not frappe.db.get_single_value("Accounts Settings", "auto_accounting_for_stock"): return frappe.reload_doctype("Account") warehouses = frappe.db.sql_list("""select name from tabAccount where account_type = 'Stock' and is_group = 0 and (warehouse is null or warehouse = '')""") if warehouses: warehouses = set_warehouse_for_stock_account(warehouses) stock_vouchers = frappe.db.sql("""select distinct sle.voucher_type, sle.voucher_no from `tabStock Ledger Entry` sle where sle.warehouse in (%s) and creation > '2016-05-01' and not exists(select name from `tabGL Entry` where account=sle.warehosue and voucher_type=sle.voucher_type and voucher_no=sle.voucher_no) order by sle.posting_date""" % ', '.join(['%s']*len(warehouses)), tuple(warehouses)) rejected = [] for voucher_type, voucher_no in stock_vouchers: try: frappe.db.sql("""delete from `tabGL Entry` where voucher_type=%s and voucher_no=%s""", (voucher_type, voucher_no)) voucher = frappe.get_doc(voucher_type, voucher_no) voucher.make_gl_entries() frappe.db.commit() except Exception, e: print frappe.get_traceback() rejected.append([voucher_type, voucher_no]) frappe.db.rollback() print rejected def set_warehouse_for_stock_account(warehouse_account): for account in warehouse_account: if frappe.db.exists('Warehouse', account): frappe.db.set_value("Account", account, "warehouse", account) else: warehouse_account.remove(account) return warehouse_account
Python
0.000001
@@ -821,10 +821,10 @@ reho -s u +s e an @@ -1690,24 +1690,25 @@ return warehouse_account +%0A
ae52e3e4dc1fc254b7e1c258caa1fe00317bb9a5
Add migrate script.
disqus_converter.py
disqus_converter.py
Python
0
@@ -0,0 +1,2462 @@ +'''Convert disquls XML comments to YAML.'''%0Aimport os%0Aimport copy%0Aimport pathlib%0Aimport hashlib%0Aimport yaml%0Aimport iso8601%0Aimport xmltodict%0Afrom postsinfo import mapping%0Afrom rebuild_comments import encrypt%0A%0A%0ACOMMENT_DIR = os.environ.get('COMMENT_DIR', './_data/comments')%0A%0A%0Adef get_disqus_threads(infile):%0A with open(infile, 'r', encoding='utf-8') as file:%0A disqus = xmltodict.parse(file.read())%5B'disqus'%5D%0A%0A threads = %7B%7D%0A for trd in disqus%5B'thread'%5D:%0A if not is_local_thread(trd):%0A threads%5Btrd%5B'@dsq:id'%5D%5D = trd%0A threads%5Btrd%5B'@dsq:id'%5D%5D%5B'posts'%5D = %5B%5D%0A%0A for pst in disqus%5B'post'%5D:%0A key = pst%5B'thread'%5D%5B'@dsq:id'%5D%0A if key in threads:%0A threads%5Bkey%5D%5B'posts'%5D.append(pst)%0A%0A return threads%0A%0A%0Adef is_local_thread(thread):%0A return '0.0.0.0' in thread%5B'link'%5D or '://localhost' in thread%5B'link'%5D%0A%0A%0Adef write(thread, post_info):%0A uid = post_info%5B'page_id'%5D%5B1:%5D%0A comments = transform(thread, post_info)%0A if comments:%0A with open(os.path.join(COMMENT_DIR, f'%7Buid%7D.yml'), 'a+', encoding='utf8') as file:%0A yaml.dump(comments,%0A file,%0A default_flow_style=False,%0A allow_unicode=True)%0A%0A%0Adef transform(thread, post_info):%0A '''Convert disqus form data to a normal comment.'''%0A comments = %5B%5D%0A for post in thread%5B'posts'%5D:%0A comment = copy.copy(post_info)%0A comment.update(%0A %7B'date': iso8601.parse_date(post%5B'createdAt'%5D),%0A 'name': post%5B'author'%5D%5B'name'%5D,%0A 'email': hashlib.md5(post%5B'author'%5D%5B'email'%5D.encode('ascii')).hexdigest(),%0A 'bucket': encrypt(post%5B'author'%5D%5B'email'%5D),%0A 'website': make_profile_url(post),%0A 'message': post%5B'message'%5D%7D)%0A comments.append(comment)%0A return comments%0A%0A%0Adef make_profile_url(post):%0A return 'https://disqus.com/by/%7B%7D/'.format(post%5B'author'%5D%5B'username'%5D) if post%5B'author'%5D%5B'isAnonymous'%5D == 'false' else ''%0A%0A%0Adef main():%0A # Load disqus%0A disqus_threads = get_disqus_threads(infile='db.xml')%0A%0A # Make sure the comment directory exists%0A pathlib.Path(COMMENT_DIR).mkdir(parents=True, exist_ok=True)%0A%0A # Convert disqus to current comment format. Use posts mapping.%0A for trd in disqus_threads.values():%0A # Update comment files with converted disqus comments%0A if trd%5B'link'%5D in mapping:%0A write(trd, mapping%5Btrd%5B'link'%5D%5D)%0A%0A%0Aif __name__ == '__main__':%0A main()
c8ae682ff98f2c5b5733ae4b299970c820e46630
Add regression test for #636
spacy/tests/regression/test_issue636.py
spacy/tests/regression/test_issue636.py
Python
0.000001
@@ -0,0 +1,506 @@ +# coding: utf8%0Afrom __future__ import unicode_literals%0A%0Afrom ...tokens.doc import Doc%0Aimport pytest%0A%0A%[email protected]%[email protected]%[email protected]('text', %5B%22I cant do this.%22%5D)%0Adef test_issue636(EN, text):%0A %22%22%22Test that to_bytes and from_bytes don't change the token lemma.%22%22%22%0A doc1 = EN(text)%0A doc2 = Doc(EN.vocab)%0A doc2.from_bytes(doc1.to_bytes())%0A print(%5Bt.lemma_ for t in doc1%5D, %5Bt.lemma_ for t in doc2%5D)%0A assert %5Bt.lemma_ for t in doc1%5D == %5Bt.lemma_ for t in doc2%5D%0A
423707ea25e88b2454a9541eb52f900da87e95b2
allow external backends, specified via ZMQ_BACKEND env
zmq/backend/__init__.py
zmq/backend/__init__.py
"""Import basic exposure of libzmq C API as a backend""" #----------------------------------------------------------------------------- # Copyright (C) 2013 Brian Granger, Min Ragan-Kelley # # This file is part of pyzmq # # Distributed under the terms of the New BSD License. The full license is in # the file COPYING.BSD, distributed as part of this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- from .select import public_api, select_backend try: _ns = select_backend('zmq.backend.cython') except ImportError: _ns = select_backend('zmq.backend.cffi') globals().update(_ns) __all__ = public_api
Python
0
@@ -609,16 +609,26 @@ ------%0A%0A +import os%0A from .se @@ -671,14 +671,262 @@ nd%0A%0A -%0A +if 'PYZMQ_BACKEND' in os.environ:%0A backend = os.environ%5B'PYZMQ_BACKEND'%5D%0A if backend in ('cython', 'cffi'):%0A backend = 'zmq.backend.%25s' %25 backend%0A _ns = select_backend(backend)%0Aelse:%0A # default to cython, fallback to cffi%0A try:%0A + @@ -968,16 +968,20 @@ ython')%0A + except I @@ -992,16 +992,20 @@ tError:%0A + _ns
cb9166c4564c4e763e1214355dc76cbe6d466258
Add data migration for section
books/migrations/0009_auto_20141127_1718.py
books/migrations/0009_auto_20141127_1718.py
Python
0
@@ -0,0 +1,1844 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Adef add_sections(apps, schema_editor):%0A # Don't just use books.models.Section, that could be out of date%0A Section = apps.get_model('books', 'Section')%0A%0A FRONT_MATTER_CHOICES = %5B%0A #('db_value', 'human readable'),%0A ('half_title', 'Half title'),%0A ('title_page', 'Title Page'),%0A ('colophon', 'Colophon'),%0A ('contents', 'Contents'),%0A ('foreward', 'Foreward'),%0A ('preface', 'Preface'),%0A ('acknowledgment', 'Acknowlegment'),%0A ('introduction', 'Introduction'),%0A ('dedication', 'Dedication'),%0A ('epigraph', 'Epigraph'),%0A ('prologue', 'Prologue'),%0A %5D%0A%0A BACK_MATTER_CHOICES = %5B%0A ('epilogue', 'Epilogue'),%0A ('afterward', 'Afterward'),%0A ('conclusion', 'Conclusion'),%0A ('postscript', 'Postscript'),%0A ('appendix', 'Appendix'),%0A ('glossary', 'Glossary'),%0A ('bibliography', 'Bibliography'),%0A ('index', 'Index'),%0A ('colophon', 'Colophon'),%0A %5D%0A%0A for order, (sect_name, _) in enumerate(FRONT_MATTER_CHOICES):%0A sect = Section(name=sect_name, order=order, location='front')%0A sect.save()%0A for order, (sect_name, _) in enumerate(BACK_MATTER_CHOICES):%0A sect = Section(name=sect_name, order=order, location='back')%0A sect.save()%0A%0Adef remove_sections(apps, schema_editor):%0A %22%22%22 Just make the migration reversible, by calling this function. %22%22%22%0A Section = apps.get_model('books', 'Section')%0A for section in Section.objects.all():%0A section.delete()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('books', '0008_auto_20141127_1657'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_sections, remove_sections),%0A %5D%0A%0A
161feec0d3764c7cdeebfdc7cd62e5901a89666a
Add initial implementation
runtracker.py
runtracker.py
Python
0.000001
@@ -0,0 +1,2205 @@ +import cv2%0Aimport numpy as np%0Aimport imutils%0A%0A%0API = 3.141592654%0A%0AAREA_ERROR_THRESH = 0.05 # Error away from the mean area%0A%0A%0A# Color ranges%0A#CALIB_COLOR_MIN = ( 70, 40, 61)%0A#CALIB_COLOR_MAX = (110, 175, 255)%0ACALIB_COLOR_MIN = ( 52, 24, 56)%0ACALIB_COLOR_MAX = ( 98, 169, 178)%0ATRACK_COLOR_MIN = ( 0, 0, 0)%0ATRACK_COLOR_MAX = (255, 225, 255)%0A%0A%0AprevCalib = %5B%5D%0AprevTrack = None%0A%0A%0Adef ellipseArea(ellipse):%0A return ellipse%5B1%5D%5B0%5D * ellipse%5B1%5D%5B1%5D * PI / 4%0A%0A%0Adef main():%0A # Open webcam%0A cap = cv2.VideoCapture(0)%0A%0A while True:%0A # Get frame%0A ret, frame = cap.read()%0A output = frame.copy()%0A hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # Convert to HSV (for color range)%0A%0A # Apply morphological filtering%0A k_x = cv2.getGaussianKernel(8, 0)%0A k_y = cv2.getGaussianKernel(8, 0)%0A kernel = k_x * np.transpose(k_y)%0A%0A filt = cv2.inRange(hsv, CALIB_COLOR_MIN, CALIB_COLOR_MAX)%0A filt = cv2.morphologyEx(filt, cv2.MORPH_OPEN, kernel, iterations=2)%0A%0A # Find contours%0A _, contours, _ = cv2.findContours(filt, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)%0A cv2.drawContours(output, contours, -1, (0, 0, 255), 3)%0A%0A matches = %5B%5D # Contours that match a marker%0A%0A for c in contours:%0A e = cv2.fitEllipse(c)%0A%0A area_c = cv2.contourArea(c)%0A area_e = ellipseArea(e)%0A%0A if abs(area_c - area_e) %3C (AREA_ERROR_THRESH * (area_c + area_e) / 2): # Is within error%0A matches.append((c, e))%0A%0A # Sort by size%0A matches.sort(key=lambda x: ellipseArea(x%5B1%5D), reverse=True)%0A%0A # Get 2 best ellipses%0A for i in range(0, min(len(matches), 2)):%0A c = matches%5Bi%5D%5B0%5D%0A e = matches%5Bi%5D%5B1%5D%0A cv2.ellipse(output, e, (0, 255, 0), 2)%0A cv2.putText(output, 'C: ' + str(cv2.contourArea(c)) + ' %7C E: ' + str(ellipseArea(e)), (int(e%5B0%5D%5B0%5D), int(e%5B0%5D%5B1%5D)), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)%0A%0A # Show frame%0A cv2.imshow('Frame', frame)%0A cv2.imshow('Filtered', filt)%0A cv2.imshow('Output', output)%0A if cv2.waitKey(1) == 27:%0A break%0A%0A%0Aif __name__ == '__main__':%0A main()
07825b7f80a12619c847de49f0f2b991faeea7b4
Add a simple handler cookie_wsh.py useful for cookie test
example/cookie_wsh.py
example/cookie_wsh.py
Python
0.000004
@@ -0,0 +1,916 @@ +# Copyright 2014 Google Inc. All rights reserved.%0A#%0A# Use of this source code is governed by a BSD-style%0A# license that can be found in the COPYING file or at%0A# https://developers.google.com/open-source/licenses/bsd%0A%0A%0Aimport urlparse%0A%0A%0Adef _add_set_cookie(request, value):%0A request.extra_headers.append(('Set-Cookie', value))%0A%0A%0Adef web_socket_do_extra_handshake(request):%0A components = urlparse.urlparse(request.uri)%0A command = components%5B4%5D%0A%0A ONE_DAY_LIFE = 'Max-Age=86400'%0A%0A if command == 'set':%0A _add_set_cookie(request, '; '.join(%5B'foo=bar', ONE_DAY_LIFE%5D))%0A elif command == 'set_httponly':%0A _add_set_cookie(request,%0A '; '.join(%5B'httpOnlyFoo=bar', ONE_DAY_LIFE, 'httpOnly'%5D))%0A elif command == 'clear':%0A _add_set_cookie(request, 'foo=0; Max-Age=0')%0A _add_set_cookie(request, 'httpOnlyFoo=0; Max-Age=0')%0A%0A%0Adef web_socket_transfer_data(request):%0A pass%0A
39019e998da2c1f73f82e0eb446df78ffc95c134
Create safe_steps.py
safe_steps.py
safe_steps.py
Python
0.000459
@@ -0,0 +1,318 @@ +import mcpi.minecraft as minecraft%0Aimport mcpi.block as block%0Amc = minecraft.Minecraft.create()%0A%0Awhile True:%0A%09p = mc.player.getTilePos()%0A%09b = mc.getBlock(p.x, p.y-1, p.z)%0A%09if b == block.AIR.id or b == block.WATER_FLOWING.id or b==block.WATER_STATIONARY.id:%0A%09%09mc.setBlock(pos.x, pos.y-1, pos.z, block.WOOD_PLANKS.id)%0A%0A%0A
c78480fc1f566bb6d266705336dbe9cd90d07996
Create 476_number_complement.py
476_number_complement.py
476_number_complement.py
Python
0.998761
@@ -0,0 +1,1381 @@ +%22%22%22%0Ahttps://leetcode.com/problems/number-complement/description/%0AGiven a positive integer, output its complement number. The complement strategy is to flip the bits of its binary representation.%0A%0ANote:%0AThe given integer is guaranteed to fit within the range of a 32-bit signed integer.%0AYou could assume no leading zero bit in the integer%E2%80%99s binary representation.%0AExample 1:%0AInput: 5%0AOutput: 2%0AExplanation: The binary representation of 5 is 101 (no leading zero bits), and its complement is 010. So you need to output 2.%0AExample 2:%0AInput: 1%0AOutput: 0%0AExplanation: The binary representation of 1 is 1 (no leading zero bits), and its complement is 0. So you need to output 0.%0A%0A%22%22%22%0Aclass Solution(object):%0A def findComplement(self, num):%0A %22%22%22%0A :type num: int%0A :rtype: int%0A %22%22%22%0A # Solution: Extract binary of the number using bin() function. For every character in the binary, get complement, append.%0A c = ''%0A bin_c = ''%0A bin_num = bin(num)%5B2:%5D%0A print bin_num%0A for i in range(0,len(bin_num)):%0A print bin_num%5Bi%5D%0A if bin_num%5Bi%5D == '0':%0A # print bin_num%5Bi%5D%0A c = '1'%0A elif bin_num%5Bi%5D == '1':%0A # print bin_num%5Bi%5D%0A c = '0'%0A bin_c = bin_c+c%0A print %22bin output: %22,(bin_c)%0A return(int(bin_c,2))%0A %0A
0104600fe32b2b676974f29df37d10cc86a7441a
enable CMake build (with HTTP/3) -- take 2
build/fbcode_builder/specs/proxygen_quic.py
build/fbcode_builder/specs/proxygen_quic.py
Python
0.000001
@@ -0,0 +1,671 @@ +#!/usr/bin/env python%0A# Copyright (c) Facebook, Inc. and its affiliates.%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0A%0Aimport specs.folly as folly%0Aimport specs.fizz as fizz%0Aimport specs.mvfst as mvfst%0Aimport specs.sodium as sodium%0Aimport specs.wangle as wangle%0Aimport specs.zstd as zstd%0A%0A%0Adef fbcode_builder_spec(builder):%0A builder.add_option(%22proxygen/proxygen:cmake_defines%22, %7B%22BUILD_QUIC%22: %22ON%22%7D)%0A return %7B%0A %22depends_on%22: %5Bfolly, wangle, fizz, sodium, zstd, mvfst%5D,%0A %22steps%22: %5Bbuilder.fb_github_cmake_install(%22proxygen/proxygen%22, %22..%22)%5D,%0A %7D%0A
37e74416a090342c18cfad87df74dd958400145d
Add 'Others' category.
bulb/migrations/0009_add_others_category.py
bulb/migrations/0009_add_others_category.py
Python
0.998649
@@ -0,0 +1,648 @@ +# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0Adef add_categories(apps, schema_editor):%0A Category = apps.get_model('bulb', 'Category')%0A Category.objects.create(code_name=%22others%22, name=%22%D8%A3%D8%AE%D8%B1%D9%89%22)%0A%0Adef remove_categories(apps, schema_editor):%0A Category = apps.get_model('bulb', 'Category')%0A Category.objects.filter(code_name=%22others%22).delete()%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('bulb', '0008_improve_status'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(%0A add_categories,%0A reverse_code=remove_categories),%0A %5D%0A
317160665a58a2e0433202e4605710b09a71de9d
add scrub script to remove solution tags, thanks https://gist.github.com/minrk/3836889
scrub_sols.py
scrub_sols.py
Python
0
@@ -0,0 +1,1339 @@ +#!/usr/bin/env python%0A%22%22%22%0Asimple example script for scrubping solution code cells from IPython notebooks%0AUsage: %60scrub_code.py foo.ipynb %5Bbar.ipynb %5B...%5D%5D%60%0AMarked code cells are scrubbed from the notebook%0A%22%22%22%0A%0Aimport io%0Aimport os%0Aimport sys%0A%0Afrom IPython.nbformat.current import read, write%0A%0Adef scrub_code_cells(nb):%0A scrubbed = 0%0A cells = 0%0A for ws in nb.worksheets:%0A for cell in ws.cells:%0A if cell.cell_type != 'code':%0A continue%0A cells += 1%0A # scrub cells marked with initial '# Solution' comment%0A # any other marker will do, or it could be unconditional%0A if cell.input.startswith(%22# Solution%22):%0A cell.input = u'# Solution goes here'%0A scrubbed += 1%0A cell.outputs = %5B%5D%0A%0A print%0A print(%22scrubbed %25i/%25i code cells from notebook %25s%22 %25 (scrubbed, cells, nb.metadata.name))%0A%0Aif __name__ == '__main__':%0A for ipynb in sys.argv%5B1:%5D:%0A print(%22scrubbing %25s%22 %25 ipynb)%0A with io.open(ipynb, encoding='utf8') as f:%0A nb = read(f, 'json')%0A scrub_code_cells(nb)%0A base, ext = os.path.splitext(ipynb)%0A new_ipynb = %22%25s_blank%25s%22 %25 (base, ext)%0A with io.open(new_ipynb, 'w', encoding='utf8') as f:%0A write(nb, f, 'json')%0A print(%22wrote %25s%22 %25 new_ipynb)%0A
3bafceba383125475d5edb895bc9d88b0dfc5042
Add status to Role
project/apps/api/migrations/0093_role_status.py
project/apps/api/migrations/0093_role_status.py
Python
0.000001
@@ -0,0 +1,481 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.4 on 2016-03-05 23:28%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Aimport django_fsm%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('api', '0092_auto_20160305_1514'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='role',%0A name='status',%0A field=django_fsm.FSMIntegerField(choices=%5B(0, b'New')%5D, default=0),%0A ),%0A %5D%0A
4735ee97aa36920e811edc450d8b6e8a09b5caf5
add utility for explode bam
iron/utilities/explode_bam.py
iron/utilities/explode_bam.py
Python
0.000001
@@ -0,0 +1,1905 @@ +#!/usr/bin/python%0Aimport sys, argparse%0Afrom subprocess import Popen, PIPE%0Afrom SamBasics import SamStream%0Afrom multiprocessing import cpu_count, Pool%0Adef main():%0A parser = argparse.ArgumentParser(description=%22Break a bam into evenly sized chunks print the number of chunks%22,formatter_class=argparse.ArgumentDefaultsHelpFormatter)%0A parser.add_argument('input',help=%22Use - for STDIN sam or directly name bamfile%22)%0A parser.add_argument('output_base',help=%22output base name myout will go to myout.1.bam%22)%0A parser.add_argument('-k',type=int,required=True,help=%22Number per chunk%22)%0A parser.add_argument('--threads',type=int,default=cpu_count(),help=%22Number of threads%22)%0A args = parser.parse_args()%0A %0A inf = None%0A if args.input == '-':%0A inf = sys.stdin%0A else: %0A cmd = %22samtools view -h %22+args.input%0A p = Popen(cmd.split(),stdout=PIPE)%0A inf = p.stdout%0A%0A v = SamStream(inf)%0A buffer = %5B%5D%0A i = 0%0A if args.threads %3E 1:%0A poo= Pool(processes=args.threads)%0A while True:%0A e = v.read_entry()%0A if not e: break%0A buffer.append(e)%0A if len(buffer) %3E= args.k:%0A i+=1%0A if args.threads %3E 1:%0A poo.apply_async(do_output,args=(buffer,v.header%5B:%5D,i,args.output_base))%0A else:%0A do_output(buffer,v.header%5B:%5D,i,args.output_base)%0A buffer = %5B%5D%0A if len(buffer) %3E 0:%0A i+=1%0A if args.threads %3E 1:%0A poo.apply_async(do_output,args=(buffer,v.header%5B:%5D,i,args.output_base))%0A else:%0A do_output(buffer,v.header%5B:%5D,i,args.output_base)%0A if args.threads %3E 1:%0A poo.close()%0A poo.join()%0A%0A if args.input != '-':%0A p.communicate()%0A print i%0A%0Adef do_output(buffer,header,i,output_base):%0A of = open(output_base+'.'+str(i)+'.bam','w')%0A cmd = 'samtools view - -Sb'%0A p = Popen(cmd.split(),stdin=PIPE,stdout=of)%0A for e in header:%0A p.stdin.write(e)%0A for e in buffer:%0A p.stdin.write(e)%0A p.communicate()%0A of.close()%0A%0Aif __name__==%22__main__%22:%0A main()%0A
ebd62eac70d5589b0b7f593009024868f981e658
Add actor with behavior similar to old-style Delay
calvin/actorstore/systemactors/std/ClassicDelay.py
calvin/actorstore/systemactors/std/ClassicDelay.py
Python
0.000002
@@ -0,0 +1,1706 @@ +# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2015 Ericsson AB%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom calvin.actor.actor import Actor, ActionResult, manage, condition, guard%0A%0A%0Aclass ClassicDelay(Actor):%0A %22%22%22%0A After first token, pass on token once every 'delay' seconds.%0A Input :%0A token: anything%0A Outputs:%0A token: anything%0A %22%22%22%0A%0A @manage(%5B'delay'%5D)%0A def init(self, delay=0.1):%0A self.delay = delay%0A self.use('calvinsys.events.timer', shorthand='timer')%0A self.timer = None%0A%0A def setup(self):%0A self.timer = self%5B'timer'%5D.repeat(self.delay)%0A%0A def will_migrate(self):%0A self.timer.cancel()%0A%0A def did_migrate(self):%0A self.setup()%0A%0A @condition(%5B'token'%5D, %5B'token'%5D)%0A @guard(lambda self, _: not self.timer)%0A def start_timer(self, token):%0A self.setup()%0A return ActionResult(production=(token, ))%0A%0A @condition(%5B'token'%5D, %5B'token'%5D)%0A @guard(lambda self, _: self.timer and self.timer.triggered)%0A def passthrough(self, token):%0A self.timer.ack()%0A return ActionResult(production=(token, ))%0A%0A action_priority = (start_timer, passthrough)%0A requires = %5B'calvinsys.events.timer'%5D%0A
a88cf930a5c0e67a7aef93ab5c4eb705ad7aad32
Fix ‘permissions_classes’ typos
kolibri/core/lessons/tests.py
kolibri/core/lessons/tests.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.test import TestCase # Create your tests here.
Python
0.999755
@@ -61,65 +61,4 @@ als%0A -%0Afrom django.test import TestCase%0A%0A# Create your tests here.%0A
e363aac46c9a5b607c7b32bcc5546c5a2728d750
Add migration which fixes missing message IDs.
climate_data/migrations/0029_auto_20170628_1527.py
climate_data/migrations/0029_auto_20170628_1527.py
Python
0
@@ -0,0 +1,953 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-06-28 15:27%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0Afrom datetime import timedelta%0A%0A%0A# noinspection PyUnusedLocal%0Adef add_message_id_to_reading(apps, schema_editor):%0A # noinspection PyPep8Naming%0A Reading = apps.get_model('climate_data', 'Reading')%0A # noinspection PyPep8Naming%0A Message = apps.get_model('climate_data', 'Message')%0A%0A for reading in Reading.objects.filter(message_id=None):%0A reading.message = Message.objects.filter(%0A station=reading.station,%0A arrival_time__gt=reading.read_time,%0A arrival_time__lt=(reading.read_time + timedelta(minutes=52))%0A ).first()%0A reading.save()%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('climate_data', '0028_auto_20170627_1914'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(add_message_id_to_reading),%0A %5D%0A
840bc57e7120ae67e84c1c7bca94cfef34c8d2a8
Copy old script from @erinspace which added identifiers to existing preprints.
scripts/add_missing_identifiers_to_preprints.py
scripts/add_missing_identifiers_to_preprints.py
Python
0
@@ -0,0 +1,2439 @@ +import sys%0Aimport time%0Aimport logging%0Afrom scripts import utils as script_utils%0Afrom django.db import transaction%0A%0Afrom website.app import setup_django%0Afrom website.identifiers.utils import request_identifiers_from_ezid, parse_identifiers%0A%0Asetup_django()%0Alogger = logging.getLogger(__name__)%0A%0A%0Adef add_identifiers_to_preprints(dry=True):%0A from osf.models import PreprintService%0A%0A preprints_without_identifiers = PreprintService.objects.filter(identifiers__isnull=True)%0A logger.info('About to add identifiers to %7B%7D preprints.'.format(preprints_without_identifiers.count()))%0A%0A for preprint in preprints_without_identifiers:%0A logger.info('Saving identifier for preprint %7B%7D from source %7B%7D'.format(preprint._id, preprint.provider.name))%0A%0A if not dry:%0A ezid_response = request_identifiers_from_ezid(preprint)%0A id_dict = parse_identifiers(ezid_response)%0A preprint.set_identifier_values(doi=id_dict%5B'doi'%5D, ark=id_dict%5B'ark'%5D)%0A preprint.save()%0A%0A doi = preprint.get_identifier('doi')%0A assert preprint._id.upper() in doi.value%0A%0A logger.info('Created DOI %7B%7D for Preprint with guid %7B%7D from service %7B%7D'.format(doi.value, preprint._id, preprint.provider.name))%0A time.sleep(1)%0A else:%0A logger.info('Dry run - would have created identifier for preprint %7B%7D from service %7B%7D'.format(preprint._id, preprint.provider.name))%0A%0A logger.info('Finished Adding identifiers to %7B%7D preprints.'.format(preprints_without_identifiers.count()))%0A%0A%0Adef main(dry=True):%0A # Start a transaction that will be rolled back if any exceptions are un%0A add_identifiers_to_preprints(dry)%0A if dry:%0A # When running in dry mode force the transaction to rollback%0A raise Exception('Dry Run complete -- not actually saved')%0A%0A%0Aif __name__ == '__main__':%0A dry = '--dry' in sys.argv%0A if not dry:%0A # If we're not running in dry mode log everything to a file%0A script_utils.add_file_logger(logger, __file__)%0A%0A # Allow setting the log level just by appending the level to the command%0A if '--debug' in sys.argv:%0A logger.setLevel(logging.DEBUG)%0A elif '--warning' in sys.argv:%0A logger.setLevel(logging.WARNING)%0A elif '--info' in sys.argv:%0A logger.setLevel(logging.INFO)%0A elif '--error' in sys.argv:%0A logger.setLevel(logging.ERROR)%0A%0A # Finally run the migration%0A main(dry=dry)%0A
3a9ec86e4b996912b1a47abe07c70116be14b3f8
Create hello.py
hello.py
hello.py
Python
0.999503
@@ -0,0 +1,18 @@ +print %22Hello all%22%0A
d73b2108358c8aa43509b6def6879fc70b138fb5
add objects
nefi2_main/nefi2/view/test2.py
nefi2_main/nefi2/view/test2.py
Python
0.000006
@@ -0,0 +1,1560 @@ +from PyQt4 import QtGui, QtCore%0Aimport sys%0A%0Aclass Main(QtGui.QMainWindow):%0A def __init__(self, parent = None):%0A super(Main, self).__init__(parent)%0A%0A # main button%0A self.addButton = QtGui.QPushButton('button to add other widgets')%0A self.addButton.clicked.connect(self.addWidget)%0A%0A # scroll area widget contents - layout%0A self.scrollLayout = QtGui.QFormLayout()%0A%0A # scroll area widget contents%0A self.scrollWidget = QtGui.QWidget()%0A self.scrollWidget.setLayout(self.scrollLayout)%0A%0A # scroll area%0A self.scrollArea = QtGui.QScrollArea()%0A self.scrollArea.setWidgetResizable(True)%0A self.scrollArea.setWidget(self.scrollWidget)%0A%0A # main layout%0A self.mainLayout = QtGui.QVBoxLayout()%0A%0A # add all main to the main vLayout%0A self.mainLayout.addWidget(self.addButton)%0A self.mainLayout.addWidget(self.scrollArea)%0A%0A # central widget%0A self.centralWidget = QtGui.QWidget()%0A self.centralWidget.setLayout(self.mainLayout)%0A%0A # set central widget%0A self.setCentralWidget(self.centralWidget)%0A%0A def addWidget(self):%0A self.scrollLayout.addRow(Test())%0A%0A%0Aclass Test(QtGui.QWidget):%0A def __init__( self, parent=None):%0A super(Test, self).__init__(parent)%0A%0A self.pushButton = QtGui.QPushButton('I am in Test widget')%0A%0A layout = QtGui.QHBoxLayout()%0A layout.addWidget(self.pushButton)%0A self.setLayout(layout)%0A%0A%0A%0Aapp = QtGui.QApplication(sys.argv)%0AmyWidget = Main()%0AmyWidget.show()%0Aapp.exec_()
98bf1c67b95d40888e26068015e4abf1b94d0640
add ddns state module
salt/states/ddns.py
salt/states/ddns.py
Python
0
@@ -0,0 +1,2909 @@ +'''%0ADynamic DNS updates.%0A====================%0A%0AEnsure a DNS record is present or absent utilizing RFC 2136%0Atype dynamic updates. Requires dnspython module.%0A%0A.. code-block:: yaml%0A%0A webserver:%0A ddns.present:%0A - zone: example.com%0A - ttl: 60%0A'''%0A%0A%0Adef __virtual__():%0A return 'ddns' if 'ddns.update' in __salt__ else False%0A%0A%0Adef present(name, zone, ttl, data, rdtype='A'):%0A '''%0A Ensures that the named DNS record is present with the given ttl.%0A%0A name%0A The host portion of the DNS record, e.g., 'webserver'%0A%0A zone%0A The zone to check/update%0A%0A ttl%0A TTL for the record%0A%0A data%0A Data for the DNS record. E.g., the IP addres for an A record.%0A%0A rdtype%0A DNS resource type. Default 'A'.%0A '''%0A ret = %7B'name': name,%0A 'changes': %7B%7D,%0A 'result': False,%0A 'comment': ''%7D%0A%0A if __opts__%5B'test'%5D:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = '%7B0%7D record %22%7B1%7D%22 will be updated'.format(rdtype, name)%0A return ret%0A%0A status = __salt__%5B'ddns.update'%5D(zone, name, ttl, rdtype, data)%0A%0A if status is None:%0A ret%5B'result'%5D = True%0A ret%5B'comment'%5D = '%7B0%7D record %22%7B1%7D%22 already present with ttl of %7B2%7D'.format(%0A rdtype, name, ttl)%0A elif status:%0A ret%5B'result'%5D = True%0A ret%5B'comment'%5D = 'Updated %7B0%7D record for %22%7B1%7D%22'.format(rdtype, name)%0A ret%5B'changes'%5D = %7B'name': name,%0A 'zone': zone,%0A 'ttl': ttl,%0A 'rdtype': rdtype,%0A 'data': data%0A %7D%0A else:%0A ret%5B'result'%5D = False%0A ret%5B'comment'%5D = 'Failed to create or update %7B0%7D record for %22%7B1%7D%22'.format(rdtype, name)%0A return ret%0A%0A%0Adef absent(name, zone, data=None, rdtype=None):%0A '''%0A Ensures that the named DNS record is absent.%0A%0A name%0A The host portion of the DNS record, e.g., 'webserver'%0A%0A zone%0A The zone to check%0A%0A data%0A Data for the DNS record. E.g., the IP addres for an A record. If omitted,%0A all records matching name (and rdtype, if provided) will be purged.%0A%0A rdtype%0A DNS resource type. If omitted, all types will be purged.%0A '''%0A ret = %7B'name': name,%0A 'changes': %7B%7D,%0A 'result': False,%0A 'comment': ''%7D%0A%0A if __opts__%5B'test'%5D:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = '%7B0%7D record %22%7B1%7D%22 will be deleted'.format(rdtype, name)%0A return ret%0A%0A status = __salt__%5B'ddns.delete'%5D(zone, name, rdtype, data)%0A%0A if status is None:%0A ret%5B'result'%5D = True%0A ret%5B'comment'%5D = 'No matching DNS record(s) present'%0A elif status:%0A ret%5B'result'%5D = True%0A ret%5B'comment'%5D = 'Deleted DNS record(s)'%0A ret%5B'changes'%5D = True%0A else:%0A ret%5B'result'%5D = False%0A ret%5B'comment'%5D = 'Failed to delete DNS record(s)'%0A return ret%0A%0A
4ea54e24948356b039ad961c857e685c30bb0737
Solve task #500
500.py
500.py
Python
0.999999
@@ -0,0 +1,606 @@ +class Solution(object):%0A def findWords(self, words):%0A %22%22%22%0A :type words: List%5Bstr%5D%0A :rtype: List%5Bstr%5D%0A %22%22%22%0A rows = %5B'qwertyuiop', 'asdfghjkl', 'zxcvbnm'%5D%0A def inOneRow(word):%0A mask = %5B0, 0, 0%5D%0A for i in range(len(rows)):%0A for ch in word:%0A if ch in rows%5Bi%5D:%0A mask%5Bi%5D = 1%0A return sum(mask) == 1%0A %0A ans = %5B%5D%0A for word in words:%0A wordl = word.lower()%0A if inOneRow(wordl):%0A ans.append(word)%0A return ans%0A %0A
ce3eef2c749f7d9f7bcd1d439497121e89e3727b
Add notification
devicehive/notification.py
devicehive/notification.py
Python
0.000001
@@ -0,0 +1,958 @@ +from devicehive.api_object import ApiObject%0A%0A%0Aclass Notification(ApiObject):%0A %22%22%22Notification class.%22%22%22%0A%0A DEVICE_ID_KEY = 'deviceId'%0A ID_KEY = 'id'%0A NOTIFICATION_KEY = 'notification'%0A PARAMETERS_KEY = 'parameters'%0A TIMESTAMP_KEY = 'timestamp'%0A%0A def __init__(self, transport, token, notification):%0A ApiObject.__init__(self, transport)%0A self._token = token%0A self._device_id = notification%5Bself.DEVICE_ID_KEY%5D%0A self._id = notification%5Bself.ID_KEY%5D%0A self._notification = notification%5Bself.NOTIFICATION_KEY%5D%0A self._parameters = notification%5Bself.PARAMETERS_KEY%5D%0A self._timestamp = notification%5Bself.TIMESTAMP_KEY%5D%0A%0A def device_id(self):%0A return self._device_id%0A%0A def id(self):%0A return self._id%0A%0A def notification(self):%0A return self._notification%0A%0A def parameters(self):%0A return self._parameters%0A%0A def timestamp(self):%0A return self._timestamp%0A
bf53cbe08d4908ab37063f35bdcb97cb682823b0
Support setting explicit mute value for Panasonic Viera TV (#13954)
homeassistant/components/media_player/panasonic_viera.py
homeassistant/components/media_player/panasonic_viera.py
""" Support for interface with a Panasonic Viera TV. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.panasonic_viera/ """ import logging import voluptuous as vol from homeassistant.components.media_player import ( SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PREVIOUS_TRACK, SUPPORT_TURN_ON, SUPPORT_TURN_OFF, SUPPORT_PLAY, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET, MEDIA_TYPE_URL, SUPPORT_PLAY_MEDIA, SUPPORT_STOP, SUPPORT_VOLUME_STEP, MediaPlayerDevice, PLATFORM_SCHEMA) from homeassistant.const import ( CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON, STATE_UNKNOWN, CONF_PORT) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['panasonic_viera==0.3.1', 'wakeonlan==1.0.0'] _LOGGER = logging.getLogger(__name__) CONF_MAC = 'mac' DEFAULT_NAME = 'Panasonic Viera TV' DEFAULT_PORT = 55000 SUPPORT_VIERATV = SUPPORT_PAUSE | SUPPORT_VOLUME_STEP | \ SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \ SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \ SUPPORT_TURN_OFF | SUPPORT_PLAY | \ SUPPORT_PLAY_MEDIA | SUPPORT_STOP PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_MAC): cv.string, vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, }) # pylint: disable=unused-argument def setup_platform(hass, config, add_devices, discovery_info=None): """Set up the Panasonic Viera TV platform.""" from panasonic_viera import RemoteControl mac = config.get(CONF_MAC) name = config.get(CONF_NAME) port = config.get(CONF_PORT) if discovery_info: _LOGGER.debug('%s', discovery_info) name = discovery_info.get('name') host = discovery_info.get('host') port = discovery_info.get('port') remote = RemoteControl(host, port) add_devices([PanasonicVieraTVDevice(mac, name, remote)]) return True host = config.get(CONF_HOST) remote = RemoteControl(host, port) add_devices([PanasonicVieraTVDevice(mac, name, remote)]) return True class PanasonicVieraTVDevice(MediaPlayerDevice): """Representation of a Panasonic Viera TV.""" def __init__(self, mac, name, remote): """Initialize the Panasonic device.""" import wakeonlan # Save a reference to the imported class self._wol = wakeonlan self._mac = mac self._name = name self._muted = False self._playing = True self._state = STATE_UNKNOWN self._remote = remote self._volume = 0 def update(self): """Retrieve the latest data.""" try: self._muted = self._remote.get_mute() self._volume = self._remote.get_volume() / 100 self._state = STATE_ON except OSError: self._state = STATE_OFF def send_key(self, key): """Send a key to the tv and handles exceptions.""" try: self._remote.send_key(key) self._state = STATE_ON except OSError: self._state = STATE_OFF return False return True @property def name(self): """Return the name of the device.""" return self._name @property def state(self): """Return the state of the device.""" return self._state @property def volume_level(self): """Volume level of the media player (0..1).""" return self._volume @property def is_volume_muted(self): """Boolean if volume is currently muted.""" return self._muted @property def supported_features(self): """Flag media player features that are supported.""" if self._mac: return SUPPORT_VIERATV | SUPPORT_TURN_ON return SUPPORT_VIERATV def turn_on(self): """Turn on the media player.""" if self._mac: self._wol.send_magic_packet(self._mac) self._state = STATE_ON def turn_off(self): """Turn off media player.""" if self._state != STATE_OFF: self.send_key('NRC_POWER-ONOFF') self._state = STATE_OFF def volume_up(self): """Volume up the media player.""" self.send_key('NRC_VOLUP-ONOFF') def volume_down(self): """Volume down media player.""" self.send_key('NRC_VOLDOWN-ONOFF') def mute_volume(self, mute): """Send mute command.""" self.send_key('NRC_MUTE-ONOFF') def set_volume_level(self, volume): """Set volume level, range 0..1.""" volume = int(volume * 100) try: self._remote.set_volume(volume) self._state = STATE_ON except OSError: self._state = STATE_OFF def media_play_pause(self): """Simulate play pause media player.""" if self._playing: self.media_pause() else: self.media_play() def media_play(self): """Send play command.""" self._playing = True self.send_key('NRC_PLAY-ONOFF') def media_pause(self): """Send media pause command to media player.""" self._playing = False self.send_key('NRC_PAUSE-ONOFF') def media_next_track(self): """Send next track command.""" self.send_key('NRC_FF-ONOFF') def media_previous_track(self): """Send the previous track command.""" self.send_key('NRC_REW-ONOFF') def play_media(self, media_type, media_id, **kwargs): """Play media.""" _LOGGER.debug("Play media: %s (%s)", media_id, media_type) if media_type == MEDIA_TYPE_URL: try: self._remote.open_webpage(media_id) except (TimeoutError, OSError): self._state = STATE_OFF else: _LOGGER.warning("Unsupported media_type: %s", media_type) def media_stop(self): """Stop playback.""" self.send_key('NRC_CANCEL-ONOFF')
Python
0
@@ -4203,34 +4203,25 @@ elf. -send_key('NRC_POWER-ONOFF' +_remote.turn_off( )%0A @@ -4339,34 +4339,26 @@ elf. -send_key('NRC_VOLUP-ONOFF' +_remote.volume_up( )%0A%0A @@ -4440,36 +4440,28 @@ elf. -send_key('NRC_VOLDOWN-ONOFF' +_remote.volume_down( )%0A%0A @@ -4542,33 +4542,29 @@ elf. -send_key('NRC_MUTE-ONOFF' +_remote.set_mute(mute )%0A%0A @@ -5121,33 +5121,27 @@ elf. -send_key('NRC_PLAY-ONOFF' +_remote.media_play( )%0A%0A @@ -5269,34 +5269,28 @@ elf. -send_key('NRC_PAUSE-ONOFF' +_remote.media_pause( )%0A%0A @@ -5376,31 +5376,33 @@ elf. -send_key('NRC_FF-ONOFF' +_remote.media_next_track( )%0A%0A @@ -5500,32 +5500,37 @@ elf. -send_key('NRC_REW-ONOFF' +_remote.media_previous_track( )%0A%0A
a02a46752d954c29a65bf8bc5b88fa3545315175
Add unit tests for timestr()
lib/svtplay_dl/tests/utils.py
lib/svtplay_dl/tests/utils.py
Python
0
@@ -0,0 +1,701 @@ +#!/usr/bin/python%0A# ex:ts=4:sw=4:sts=4:et%0A# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*-%0A%0A# The unittest framwork doesn't play nice with pylint:%0A# pylint: disable-msg=C0103%0A%0Afrom __future__ import absolute_import%0Aimport unittest%0Aimport svtplay_dl.utils%0A%0Aclass timestrTest(unittest.TestCase):%0A def test_1(self):%0A self.assertEqual(svtplay_dl.utils.timestr(1), %2200:00:00,00%22)%0A%0A def test_100(self):%0A self.assertEqual(svtplay_dl.utils.timestr(100), %2200:00:00,10%22)%0A%0A def test_3600(self):%0A self.assertEqual(svtplay_dl.utils.timestr(3600), %2200:00:03,60%22)%0A%0A def test_3600000(self):%0A self.assertEqual(svtplay_dl.utils.timestr(3600000), %2201:00:00,00%22)%0A
46c036cad1323d55c61f546b5cd6174739ab1b42
add helper functions for data persistence
ws/data_persistence.py
ws/data_persistence.py
Python
0.000001
@@ -0,0 +1,1987 @@ +# https://github.com/usc-isi-i2/dig-etl-engine/issues/92%0A%0Aimport json%0Aimport threading%0Aimport os%0Aimport codecs%0A%0A%0A# 1.acquire file write lock%0A# 2.write to file.new%0A# 3.acquire replace lock%0A# 4.rename file to file.old%0A# 5.rename file.new to file%0A# 6.release replace lock and write lock%0A# 7.remove file.old%0Adef dump_data(data, file_path, write_lock, replace_lock):%0A new_path = file_path + '.new'%0A old_path = file_path + '.old'%0A%0A try:%0A write_lock.acquire()%0A with codecs.open(new_path, 'w') as f:%0A f.write(data)%0A%0A replace_lock.acquire()%0A # https://docs.python.org/2/library/os.html#os.rename%0A # On Unix, if dst exists and is a file,%0A # it will be replaced silently if the user has permission.%0A os.rename(file_path, old_path)%0A os.rename(new_path, file_path)%0A os.remove(old_path)%0A except Exception as e:%0A print e%0A finally:%0A write_lock.release()%0A replace_lock.release()%0A%0A%0A# when starting:%0A# if only file exists, correct.%0A# if both file.new and file.old exist, ignore file.old and rename file.new to file (shut down in the middle of replacing, file.new is complete)%0A# if both file.new and file exist, ignore file.new (shut down in the middle of generating file.new).%0A# if only file.new exists, error (user deletion)%0A# if only file.old exists, error (user deletion)%0A# if three of them exists, error (user operation, system error%0Adef read_data(file_path):%0A new_path = file_path + '.new'%0A old_path = file_path + '.old'%0A has_file = os.path.exists(file_path)%0A has_new = os.path.exists(new_path)%0A has_old = os.path.exists(old_path)%0A%0A if has_file and not has_new and not has_old:%0A pass%0A elif not has_file and has_old and has_new:%0A os.remove(old_path)%0A os.rename(new_path, file_path)%0A elif has_file and not has_old and has_new:%0A os.remove(new_path)%0A else:%0A return%0A%0A with codecs.open(file_path, 'r') as f:%0A return f.read()%0A
327b74d5e0328e6415520b907e4c43ed8cb54cf2
add sample that fetches the graph and renders it as an ascii tree
examples/fetchDebianDependencyGraph.py
examples/fetchDebianDependencyGraph.py
Python
0
@@ -0,0 +1,1529 @@ +#!/usr/bin/python%0Aimport sys%0Afrom pyArango.connection import *%0Afrom pyArango.graph import *%0Afrom asciitree import *%0A%0Aconn = Connection(username=%22root%22, password=%22%22)%0A%0Adb = conn%5B%22ddependencyGrahp%22%5D%0A%0Aif not db.hasGraph('debian_dependency_graph'):%0A raise Exception(%22didn't find the debian dependency graph, please import first!%22)%0A%0AddGraph = db.graphs%5B'debian_dependency_graph'%5D%0A%0AgraphQuery = '''%0AFOR package, depends, path IN%0A 1..2 ANY%0A @startPackage Depends RETURN path%0A'''%0A%0AstartNode = sys.argv%5B1%5D%0A%0AbindVars = %7B %22startPackage%22: %22packages/%22 + startNode %7D%0A%0AqueryResult = db.AQLQuery(graphQuery, bindVars=bindVars, rawResults=True)%0A%0A# sub iterateable object to build up the tree for draw_tree:%0Aclass Node(object):%0A def __init__(self, name, children):%0A self.name = name%0A self.children = children%0A %0A def getChild(self, searchName):%0A for child in self.children:%0A if child.name == searchName:%0A return child%0A return None%0A %0A def __str__(self):%0A return self.name%0A%0Adef iteratePath(path, depth, currentNode):%0A pname = path%5Bdepth%5D%5B'name'%5D%0A subNode = currentNode.getChild(pname)%0A if subNode == None:%0A subNode = Node(pname, %5B%5D)%0A currentNode.children.append(subNode)%0A if len(path) %3E depth + 1:%0A iteratePath(path, depth + 1, subNode)%0A%0A# Now we fold the paths substructure into the tree:%0ArootNode = Node(startNode, %5B%5D)%0Afor path in queryResult:%0A p = path%5B'edges'%5D%0A iteratePath(p, 0, rootNode)%0A%0Aprint draw_tree(rootNode)%0A
8e73752e9242796a933d3566eb4a5e4470f13d5e
Create sequences.py
sequences.py
sequences.py
Python
0.000009
@@ -0,0 +1,1455 @@ +import random%0Aimport sys%0Aimport os%0A%0A# User input%0Auser_input = input(%22Type in 5 integers of any sequence separated by commas. Example: 1,2,3,4,5: %22)%0Alist_input = user_input.split(%22,%22)%0A# Convert numbered strings into integers in list%0Alist_int = list(map(int, list_input))%0A%0A# Check Arithmetic Sequence%0Alist_arith = list_int%5B1%5D - list_int%5B0%5D%0Aif list_int%5B1%5D == list_int%5B0%5D + list_arith and list_int%5B2%5D == list_int%5B1%5D + list_arith:%0A print(%22Arithmetic Sequence%22)%0A%0A# Check Geometric Sequence%0Aif list_int%5B1%5D == list_int%5B0%5D * 2 and list_int%5B2%5D == list_int%5B1%5D * 2 and list_int%5B3%5D == list_int%5B2%5D * 2:%0A print(%22This is a Geometric Sequence%22)%0A%0A# Check Quadratic Sequence%0Alist_quad1 = list_int%5B1%5D - list_int%5B0%5D%0Alist_quad2 = list_int%5B2%5D - list_int%5B1%5D%0Alist_diff = list_quad2 - list_quad1%0Aif list_int%5B1%5D == list_int%5B0%5D + list_quad1 and list_int%5B2%5D == list_int%5B1%5D + list_quad2:%0A print(%22This is a Quadratic Sequence%22)%0A%0A# Check Cubic Sequence%0Acub1 = list_int%5B1%5D - list_int%5B0%5D # Subtraction Process%0Acub2 = list_int%5B2%5D - list_int%5B1%5D # Subtraction Process%0Acub3 = list_int%5B3%5D - list_int%5B2%5D # Subtraction Process%0A%0Acub_r1 = cub3 - cub2 # Subtraction Process%0Acub_r2 = cub2 - cub1 # Subtraction Process%0A%0A# %22if%22 comparison%0Aif cub_r1 == cub_r2:%0A print(%22This is a Cubic Sequence%22)%0A%0A# Check Fibonacci Sequence%0Afib_chck1 = list_int%5B0%5D + list_int%5B1%5D%0Afib_chck2 = list_int%5B1%5D + list_int%5B2%5D%0A%0Aif list_int%5B2%5D == fib_chck1 and list_int%5B3%5D == fib_chck2:%0A print(%22Fibonacci Sequence%22)%0A
d6db1d0b81211a80884131b10212195ab38f99ad
Fix a conflict with IPython.
dosagelib/output.py
dosagelib/output.py
# -*- coding: iso-8859-1 -*- # Copyright (C) 2004-2005 Tristan Seligmann and Jonathan Jacobs # Copyright (C) 2012-2014 Bastian Kleineidam import time import sys import os import threading import traceback import codecs from .ansicolor import Colorizer lock = threading.Lock() def get_threadname(): """Return name of current thread.""" return threading.current_thread().getName() class Output(object): """Print output with context, indentation and optional timestamps.""" def __init__(self, stream=None): """Initialize context and indentation.""" self.context = None self.level = 0 self.timestamps = False if stream is None: if hasattr(sys.stdout, "encoding") and sys.stdout.encoding: self.encoding = sys.stdout.encoding else: self.encoding = 'utf-8' if sys.version_info[0] >= 3: stream = sys.stdout.buffer else: stream = sys.stdout stream = codecs.getwriter(self.encoding)(stream, 'replace') self.setStream(stream) def setStream(self, stream): """Initialize context and indentation.""" self.stream = Colorizer(stream) def info(self, s, level=0): """Write an informational message.""" self.write(s, level=level) def debug(self, s, level=2): """Write a debug message.""" self.write(s, level=level, color='white') def warn(self, s): """Write a warning message.""" self.write(u"WARN: %s" % s, color='bold;yellow') def error(self, s, tb=None): """Write an error message.""" self.write(u"ERROR: %s" % s, color='light;red') #if tb is not None: # self.write('Traceback (most recent call last):', 1) def exception(self, s): """Write error message with traceback info.""" self.error(s) type, value, tb = sys.exc_info() self.writelines(traceback.format_stack(), 1) self.writelines(traceback.format_tb(tb)[1:], 1) self.writelines(traceback.format_exception_only(type, value), 1) def write(self, s, level=0, color=None): """Write message with indentation, context and optional timestamp.""" if level > self.level: return if self.timestamps: timestamp = time.strftime(u'%H:%M:%S ') else: timestamp = u'' with lock: if self.context: self.stream.write(u'%s%s> ' % (timestamp, self.context)) elif self.context is None: self.stream.write(u'%s%s> ' % (timestamp, get_threadname())) self.stream.write(u'%s' % s, color=color) try: text_type = unicode except NameError: text_type = str self.stream.write(text_type(os.linesep)) self.stream.flush() def writelines(self, lines, level=0): """Write multiple messages.""" for line in lines: for line in line.rstrip(u'\n').split(u'\n'): self.write(line.rstrip(u'\n'), level=level) out = Output()
Python
0.00166
@@ -11,18 +11,13 @@ ng: -iso-8859-1 +utf-8 -*- @@ -126,16 +126,63 @@ ineidam%0A +# Copyright (C) 2005-2016 Tobias Gruetzmacher%0A%0A import t @@ -288,16 +288,17 @@ orizer%0A%0A +%0A lock = t @@ -314,16 +314,17 @@ Lock()%0A%0A +%0A def get_ @@ -923,32 +923,37 @@ if -sys.version_info%5B0%5D %3E= 3 +hasattr(sys.stdout, 'buffer') :%0A @@ -1693,32 +1693,32 @@ ror message.%22%22%22%0A + self.wri @@ -1760,101 +1760,8 @@ ed') -%0A #if tb is not None:%0A # self.write('Traceback (most recent call last):', 1) %0A%0A
ea40075f8924c2d61da8f92fe9ecf74045bbe6cc
add script to convert Tandem Repeats Finder dat format to bed format required for STRetch
scripts/TRFdat_to_bed.py
scripts/TRFdat_to_bed.py
Python
0
@@ -0,0 +1,1770 @@ +#!/usr/bin/env python%0Afrom argparse import (ArgumentParser, FileType)%0A%0Adef parse_args():%0A %22Parse the input arguments, use '-h' for help%22%0A parser = ArgumentParser(description='Convert Tandem Repeat Finder (TRF) dat file to bed format with repeat units for microsatellite genotyping')%0A parser.add_argument(%0A '--dat', type=str, required=True,%0A help='Input dat file produced by Tandem Repeat Finder (TRF) using the -d option')%0A parser.add_argument(%0A '--bed', type=str, required=True,%0A help='Output bed file containing genomic locations and repeat units of microsatellites.')%0A%0A return parser.parse_args()%0A%0A### Main%0Adef main():%0A # Parse command line arguments%0A args = parse_args()%0A datfile = args.dat%0A bedfile = args.bed%0A%0A with open(bedfile, 'w') as bed:%0A chrom = %22%22%0A with open(datfile, 'r') as dat:%0A for line in dat:%0A splitline = line.split()%0A if line.startswith(%22Sequence:%22):%0A chrom = line.split()%5B1%5D%0A else:%0A # Catch index errors when line is blank%0A try:%0A # Check if in header sequence (all non-header lines start with an int: start pos)%0A try:%0A int(splitline%5B0%5D)%0A except ValueError:%0A continue%0A start = splitline%5B0%5D%0A end = splitline%5B1%5D%0A motif = splitline%5B13%5D%0A copynum = splitline%5B3%5D%0A bed.write('%5Ct'.join(%5Bchrom,start,end,motif,copynum%5D) + '%5Cn')%0A except IndexError:%0A pass%0A%0Aif __name__ == '__main__':%0A main()%0A
272eceebbc44bd7dc44498233a7dca5ab9c2bdd8
add iplookup
scripts/iplookup.py
scripts/iplookup.py
Python
0.000001
@@ -0,0 +1,991 @@ +import sys%0Aimport json%0Aimport numpy as np%0Aimport pandas as pd%0Aimport geoip2.database%0A%0A%0Aif len(sys.argv) != 3:%0A sys.exit('Please specify a GeoLite DB and an ip table.')%0A%0Areader = geoip2.database.Reader(sys.argv%5B1%5D)%0A%0A%0Adef get_name(entry, lang):%0A if hasattr(entry, 'names') and lang in entry.names:%0A return entry.names%5Blang%5D%0A%0A return 'unknown'%0A%0A%0Adef get_location(addr):%0A response = reader.city(addr)%0A city = get_name(response.city, 'en')%0A lat = response.location.latitude%0A lng = response.location.longitude%0A return (city, lat, lng)%0A%0A%0Aip = np.loadtxt(sys.argv%5B2%5D, dtype=str)%0A%0Alocations = map(get_location, ip)%0A%0Aseries = pd.Series(locations)%0Aucounts = series.value_counts()%0A%0Ainfo = %5B%5D%0Afor location, count in zip(ucounts.keys(), ucounts.get_values()):%0A if location:%0A info.append(%7B'city_name': location%5B0%5D,%0A 'lat': location%5B1%5D,%0A 'long': location%5B-1%5D,%0A 'nb_visits': count%7D)%0A%0Aprint json.dumps(info)%0A
462cdfaf93f23e227b8da44e143a5ff9e8c047be
test futil for files
tests/test_futil.py
tests/test_futil.py
Python
0
@@ -0,0 +1,582 @@ +%22%22%22Run doctests in pug.nlp.futil.%22%22%22%0Afrom __future__ import print_function, absolute_import%0A%0Aimport doctest%0A%0Aimport pug.nlp.futil%0A%0Afrom unittest import TestCase%0A%0A%0Aclass DoNothingTest(TestCase):%0A %22%22%22A useless TestCase to encourage Django unittests to find this module and run %60load_tests()%60.%22%22%22%0A def test_example(self):%0A self.assertTrue(True)%0A%0A%0Adef load_tests(loader, tests, ignore):%0A %22%22%22Run doctests for the pug.nlp.futil module%22%22%22%0A tests.addTests(doctest.DocTestSuite(pug.nlp.futil, optionflags=doctest.ELLIPSIS %7C doctest.NORMALIZE_WHITESPACE))%0A return tests%0A
a1039c2e38243b64d2027621aa87ee020636f23b
Add initial test for routes.
tests/test_views.py
tests/test_views.py
Python
0
@@ -0,0 +1,560 @@ +#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport os%0Aimport sys%0Asys.path.insert(0, os.path.abspath('..'))%0A%0Aimport website%0Aimport unittest%0Aimport tempfile%0A%0Aclass FPOTestCase(unittest.TestCase):%0A%0A def test_homepage(self):%0A self.app = website.app.test_client()%0A resp = self.app.get('/')%0A self.assertEqual(resp.status_code, 200)%0A%0A def test_admin(self):%0A self.app = website.app.test_client()%0A resp = self.app.get('/admin/')%0A self.assertEqual(resp.status_code, 200)%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
6cebbd302556469dd4231d6252ec29c5d7c1165c
add script to convert data from Rime/luna-pinyin
data/convertdict.py
data/convertdict.py
Python
0
@@ -0,0 +1,1938 @@ +#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0Aimport sys%0A%0A%0Adef uniq(seq): # Dave Kirby%0A # Order preserving%0A seen = set()%0A return %5Bx for x in seq if x not in seen and not seen.add(x)%5D%0A%0A%0Adef pinyin(word):%0A N = len(word)%0A pos = 0%0A result = %5B%5D%0A while pos %3C N:%0A for i in range(N, pos, -1):%0A frag = word%5Bpos:i%5D%0A if frag in chdict:%0A result.append(sorted(chdict%5Bfrag%5D, key=lambda x: -prob.get((frag, x), 0))%5B0%5D)%0A break%0A pos = i%0A return ' '.join(result)%0A%0Achdict = %7B%7D%0Aprob = %7B%7D%0Astarted = False%0A%0A# Pass 1: Load Pinyin and its probability from dict%0Awith open('luna_pinyin.dict.yaml', 'r', encoding='utf-8') as f:%0A for ln in f:%0A ln = ln.strip()%0A if started and ln and ln%5B0%5D != '#':%0A l = ln.split('%5Ct')%0A w, c = l%5B0%5D, l%5B1%5D%0A if w in chdict:%0A chdict%5Bw%5D.append(c)%0A else:%0A chdict%5Bw%5D = %5Bc%5D%0A if len(l) == 3:%0A if l%5B2%5D%5B-1%5D == '%25':%0A p = float(l%5B2%5D%5B:-1%5D) / 100%0A else:%0A p = float(l%5B2%5D)%0A prob%5B(w, c)%5D = p%0A elif ln == '...':%0A started = True%0A%0Aessay = %7B%7D%0A# Pass 2: Load more words and word frequency%0Awith open('essay.txt', 'r', encoding='utf-8') as f:%0A for ln in f:%0A word, freq = ln.strip().split('%5Ct')%0A # add-one smoothing%0A essay%5Bword%5D = int(freq) + 1%0A if len(word) %3E 1:%0A c = pinyin(word)%0A if word not in chdict:%0A chdict%5Bword%5D = %5Bc%5D%0A%0A# Pass 3: Calculate (word, pinyin) pair frequency%0Afinal = %5B%5D%0Afor word, codes in chdict.items():%0A for code in codes:%0A freq = max(int(essay.get(word, 1) * prob.get((word, code), 1)), 1)%0A final.append((word, code, freq))%0Afinal.sort()%0A%0Awith open('pinyin_rime.txt', 'w', encoding='utf-8') as f:%0A for item in final:%0A f.write('%25s%5Ct%25s%5Ct%25s%5Cn' %25 item)%0A
41e3d696967b523d0d031a0a17d18c9804f455ee
Change G+ default type
djangocms_blog/settings.py
djangocms_blog/settings.py
# -*- coding: utf-8 -*- from django.conf import settings from meta_mixin import settings as meta_settings BLOG_IMAGE_THUMBNAIL_SIZE = getattr(settings, 'BLOG_IMAGE_THUMBNAIL_SIZE', { 'size': '120x120', 'crop': True, 'upscale': False }) BLOG_IMAGE_FULL_SIZE = getattr(settings, 'BLOG_IMAGE_FULL_SIZE', { 'size': '640x120', 'crop': True, 'upscale': False }) BLOG_TAGCLOUD_MIN = getattr(settings, 'BLOG_TAGCLOUD_MIN', 1) BLOG_TAGCLOUD_MAX = getattr(settings, 'BLOG_TAGCLOUD_MAX', 10) BLOG_PAGINATION = getattr(settings, 'BLOG_PAGINATION', 10) BLOG_LATEST_POSTS = getattr(settings, 'BLOG_LATEST_POSTS', 5) BLOG_POSTS_LIST_TRUNCWORDS_COUNT = getattr(settings, 'BLOG_POSTS_LIST_TRUNCWORDS_COUNT', 100) BLOG_TYPE = getattr(settings, 'BLOG_TYPE', 'Article') BLOG_FB_TYPE = getattr(settings, 'BLOG_FB_TYPE', 'Article') BLOG_FB_APPID = getattr(settings, 'BLOG_FB_APPID', meta_settings.FB_APPID) BLOG_FB_PROFILE_ID = getattr(settings, 'BLOG_FB_PROFILE_ID', meta_settings.FB_PROFILE_ID) BLOG_FB_PUBLISHER = getattr(settings, 'BLOG_FB_PUBLISHER', meta_settings.FB_PUBLISHER) BLOG_FB_AUTHOR_URL = getattr(settings, 'BLOG_FB_AUTHOR_URL', 'get_author_url') BLOG_FB_AUTHOR = getattr(settings, 'BLOG_FB_AUTHOR', 'get_author_name') BLOG_TWITTER_TYPE = getattr(settings, 'BLOG_TWITTER_TYPE', 'Summary') BLOG_TWITTER_SITE = getattr(settings, 'BLOG_TWITTER_SITE', meta_settings.TWITTER_SITE) BLOG_TWITTER_AUTHOR = getattr(settings, 'BLOG_TWITTER_AUTHOR', 'get_author_twitter') BLOG_GPLUS_TYPE = getattr(settings, 'BLOG_GPLUS_SCOPE_CATEGORY', 'Article') BLOG_GPLUS_AUTHOR = getattr(settings, 'BLOG_GPLUS_AUTHOR', 'get_author_gplus') BLOG_ENABLE_COMMENTS = getattr(settings, 'BLOG_ENABLE_COMMENTS', True) BLOG_USE_PLACEHOLDER = getattr(settings, 'BLOG_USE_PLACEHOLDER', True)
Python
0
@@ -1537,31 +1537,28 @@ CATEGORY', ' -Article +Blog ')%0ABLOG_GPLU
ab2b2c6f12e2e5ec53ac6d140919a343a74b7e3c
Update migration
django_afip/migrations/0017_receipt_issued_date.py
django_afip/migrations/0017_receipt_issued_date.py
Python
0
@@ -0,0 +1,623 @@ +# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-06-10 13:33%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('afip', '0016_auto_20170529_2012'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='receipt',%0A name='issued_date',%0A field=models.DateField(%0A help_text=(%0A 'Can diverge up to 5 days for good, or 10 days otherwise'%0A ),%0A verbose_name='issued date',%0A ),%0A ),%0A %5D%0A
a52dd9d66ff7d9a29f6d635e5ca1a2a0584c267b
Add rosetta utils
rosetta_utils.py
rosetta_utils.py
Python
0.0015
@@ -0,0 +1,379 @@ +# From: https://github.com/mbi/django-rosetta/issues/50%0A# Gunicorn may work with --reload option but it needs%0A# https://pypi.python.org/pypi/inotify package for performances%0A%0Afrom django.dispatch import receiver%0Afrom rosetta.signals import post_save%0A%0Aimport time%0Aimport os%0A%0A@receiver(post_save)%0Adef restart_server(sender, **kwargs):%0A os.system(%22./gunicorn.sh stop%22)%0A pass%0A%0A
8410b027987f088b86989898b4fade5b0960886a
Solve problem 2
problem002.py
problem002.py
Python
0.999999
@@ -0,0 +1,205 @@ +#!/usr/bin/env python3%0A%0Adef fibs(maxnumber):%0A fib1, fib2 = 1, 2%0A while fib1 %3C maxnumber:%0A yield fib1%0A fib1, fib2 = fib2, fib1 + fib2%0A%0Aprint(sum(f for f in fibs(4000000) if f %25 2 == 0))%0A
278920272efd7ab959d7cad5b5f7d6c17935c7e6
Add problem 35, circular primes
problem_35.py
problem_35.py
Python
0.000126
@@ -0,0 +1,945 @@ +from math import sqrt%0Afrom time import time%0A%0APRIME_STATUS = %7B%7D%0A%0A%0Adef is_prime(n):%0A if n == 2:%0A return True%0A if n %25 2 == 0 or n %3C= 1:%0A return False%0A for i in range(3, int(sqrt(n))+1, 2):%0A if n %25 i == 0:%0A return False%0A return True%0A%0A%0Adef check_prime_circles(num):%0A circles = %5B%5D%0A s = str(num)%0A for i in range(len(s)):%0A circle = int(s%5Bi:%5D + s%5B0:i%5D)%0A circles.append(circle)%0A if circle not in PRIME_STATUS:%0A PRIME_STATUS%5Bcircle%5D = is_prime(circle)%0A if not PRIME_STATUS%5Bcircle%5D:%0A return False%0A return True%0A%0A%0Adef main():%0A circular_primes = %5B%5D%0A for num in range(2, 1000000):%0A if check_prime_circles(num):%0A circular_primes.append(num)%0A print 'Circular primes:', circular_primes%0A print 'Amount of circular primes:', len(circular_primes)%0A%0Aif __name__ == '__main__':%0A t = time()%0A main()%0A print 'Time:', time() - t%0A
dad430fd56b8be22bd1a3b9773f9948c3e305883
Add unit tests for lazy strings
stringlike/test/lazy_tests.py
stringlike/test/lazy_tests.py
Python
0.000001
@@ -0,0 +1,1367 @@ +import sys%0Aimport os%0Asys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..')))%0A%0A%0Afrom stringlike.lazy import LazyString, CachedLazyString%0Afrom unittest import main, TestCase%0A%0A%0Aclass TestLazyString(TestCase):%0A def test_equality(self):%0A self.assertEqual(LazyString(lambda: 'abc'), 'abc')%0A%0A def test_delay(self):%0A self.evaluateCount = 0%0A%0A def func():%0A self.evaluateCount += 1%0A return 'abc'%0A %0A lazyString = LazyString(func)%0A self.assertEqual(self.evaluateCount, 0)%0A%0A self.assertEqual(lazyString, 'abc')%0A self.assertEqual(self.evaluateCount, 1)%0A%0A self.assertEqual(lazyString, 'abc')%0A self.assertEqual(self.evaluateCount, 2)%0A%0A%0Aclass TestCachedLazyString(TestCase):%0A def test_equality(self):%0A self.assertEqual(CachedLazyString(lambda: 'abc'), 'abc')%0A%0A def test_delay(self):%0A self.evaluateCount = 0%0A%0A def func():%0A self.evaluateCount += 1%0A return 'abc'%0A %0A cachedLazyString = CachedLazyString(func)%0A self.assertEqual(self.evaluateCount, 0)%0A%0A self.assertEqual(cachedLazyString, 'abc')%0A self.assertEqual(self.evaluateCount, 1)%0A%0A self.assertEqual(cachedLazyString, 'abc')%0A self.assertEqual(self.evaluateCount, 1)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
458d2e55de4db6c9f72758b745245301ebd02f48
Add solution 100
100_to_199/euler_100.py
100_to_199/euler_100.py
Python
0.998911
@@ -0,0 +1,1201 @@ +#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A'''%0AProblem 100%0AIf a box contains twenty-one coloured discs, composed of fifteen blue discs and six red discs, and two discs were taken at random, it can be seen that the probability of taking two blue discs, P(BB) = (15/21)%C3%97(14/20) = 1/2.%0AThe next such arrangement, for which there is exactly 50%25 chance of taking two blue discs at random, is a box containing eighty-five blue discs and thirty-five red discs.%0ABy finding the first arrangement to contain over 1012 = 1,000,000,000,000 discs in total, determine the number of blue discs that the box would contain.%0A'''%0A%0Afrom itertools import count%0Afrom math import sqrt, ceil%0A%0A%0A# https://oeis.org/A001542%0Adef get_nominator(n):%0A a = ceil((((3 + 2 * sqrt(2)) ** n) - ((3 - 2 * sqrt(2)) ** n)) / (2 * sqrt(2)))%0A return a%0A%0A%0A# Actually Diophantine pairs.. https://oeis.org/A011900%0Adef p100(): # Answer: 756872327473, 0.01s%0A L = 10 ** 12%0A n = 1%0A for i in count(1):%0A np = get_nominator(i // 2) # pattern is repeated%0A res = n * (n+np)%0A n = n + np%0A if res * 1.414 %3E L: # 15/21, 85/120 is around 1.414xxxx%0A print(res)%0A break%0A return%0A%0A%0Ap100()%0A
1966225450e36921c283e46dfb896e86a8d41c94
load tasks in project via __setup__ instead of onload function, to return tasks via get_doc
erpnext/projects/doctype/project/project.py
erpnext/projects/doctype/project/project.py
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe from frappe.utils import flt, getdate from frappe import _ from frappe.model.document import Document class Project(Document): def get_feed(self): return '{0}: {1}'.format(_(self.status), self.project_name) def onload(self): """Load project tasks for quick view""" for task in frappe.get_all("Task", "*", {"project": self.name}, order_by="exp_start_date asc"): self.append("tasks", { "title": task.subject, "status": task.status, "start_date": task.exp_start_date, "end_date": task.exp_end_date, "description": task.description, "task_id": task.name }) def validate(self): self.validate_dates() self.sync_tasks() def validate_dates(self): if self.expected_start_date and self.expected_end_date: if getdate(self.expected_end_date) < getdate(self.expected_start_date): frappe.throw(_("Expected End Date can not be less than Expected Start Date")) def sync_tasks(self): """sync tasks and remove table""" if self.flags.dont_sync_tasks: return task_names = [] for t in self.tasks: if t.task_id: task = frappe.get_doc("Task", t.task_id) else: task = frappe.new_doc("Task") task.project = self.name task.update({ "subject": t.title, "status": t.status, "exp_start_date": t.start_date, "exp_end_date": t.end_date, "description": t.description, }) task.flags.ignore_links = True task.flags.from_project = True task.save(ignore_permissions = True) task_names.append(task.name) # delete for t in frappe.get_all("Task", ["name"], {"project": self.name, "name": ("not in", task_names)}): frappe.delete_doc("Task", t.name) self.tasks = [] def update_percent_complete(self): total = frappe.db.sql("""select count(*) from tabTask where project=%s""", self.name)[0][0] if total: completed = frappe.db.sql("""select count(*) from tabTask where project=%s and status in ('Closed', 'Cancelled')""", self.name)[0][0] frappe.db.set_value("Project", self.name, "percent_complete", int(float(completed) / total * 100)) def update_costing(self): total_cost = frappe.db.sql("""select sum(total_costing_amount) as costing_amount, sum(total_billing_amount) as billing_amount, sum(total_expense_claim) as expense_claim, min(act_start_date) as start_date, max(act_end_date) as end_date, sum(actual_time) as time from `tabTask` where project = %s""", self.name, as_dict=1)[0] self.total_costing_amount = total_cost.costing_amount self.total_billing_amount = total_cost.billing_amount self.total_expense_claim = total_cost.expense_claim self.actual_start_date = total_cost.start_date self.actual_end_date = total_cost.end_date self.actual_time = total_cost.time self.gross_margin = flt(total_cost.billing_amount) - flt(total_cost.costing_amount) if self.total_billing_amount: self.per_gross_margin = (self.gross_margin / flt(self.total_billing_amount)) *100 def update_purchase_costing(self): self.total_purchase_cost = frappe.db.sql("""select sum(amount) as cost from `tabPurchase Invoice Item` where project_name = %s and docstatus=1 """, self.name, as_dict=1)[0].cost or 0 @frappe.whitelist() def get_cost_center_name(project_name): return frappe.db.get_value("Project", project_name, "cost_center")
Python
0.000001
@@ -398,14 +398,17 @@ def -onload +__setup__ (sel @@ -453,16 +453,34 @@ view%22%22%22%0A +%09%09self.tasks = %5B%5D%0A %09%09for ta
c421024bfd1660685bb6ec6cb84a0369244627c5
add celery module
service_mapper/celery.py
service_mapper/celery.py
Python
0.000001
@@ -0,0 +1,580 @@ +from __future__ import absolute_import%0A%0Aimport os%0A%0Afrom celery import Celery%0A%0Afrom django.conf import settings%0A%0A# set the default Django settings module for the 'celery' program.%0Aos.environ.setdefault('DJANGO_SETTINGS_MODULE', 'service_mapper.settings')%0A%0Aapp = Celery('service_mapper')%0A%0A# Using a string here means the worker will not have to%0A# pickle the object when using Windows.%0Aapp.config_from_object('django.conf:settings')%0Aapp.autodiscover_tasks(lambda: settings.INSTALLED_APPS)%0A%0A%[email protected](bind=True)%0Adef debug_task(self):%0A print('Request: %7B0!r%7D'.format(self.request))%0A
2eb05eb7d42f1b14191cccba2563c2105fabaed1
Add processing module
processing.py
processing.py
Python
0.000001
@@ -0,0 +1,749 @@ +#!/usr/bin/env python%0A%22%22%22%0AProcessing routines for the waveFlapper case.%0A%0A%22%22%22%0A%0Aimport foampy%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Awidth_2d = 0.1%0Awidth_3d = 3.66%0A%0Adef plot_force():%0A %22%22%22Plots the streamwise force on the paddle over time.%22%22%22%0A %0Adef plot_moment():%0A data = foampy.load_forces_moments()%0A i = 10%0A t = data%5B%22time%22%5D%5Bi:%5D%0A m = data%5B%22moment%22%5D%5B%22pressure%22%5D%5B%22z%22%5D + data%5B%22moment%22%5D%5B%22viscous%22%5D%5B%22z%22%5D%0A m = m%5Bi:%5D*width_3d/width_2d%0A plt.figure()%0A plt.plot(t, m)%0A plt.xlabel(%22t (s)%22)%0A plt.ylabel(%22Flapper moment (Nm)%22)%0A print(%22Max moment from CFD =%22, m.max(), %22Nm%22)%0A print(%22Theoretical max moment (including inertia) =%22, 5500*3.3, %22Nm%22) %0A plt.show()%0A %0Aif __name__ == %22__main__%22:%0A plot_moment()%0A
df0e285b6f8465eb273af50c242299c5601fa09f
Add a new example
examples/sanic_aiomysql_with_global_pool.py
examples/sanic_aiomysql_with_global_pool.py
Python
0.000102
@@ -0,0 +1,2105 @@ +# encoding: utf-8%0A%22%22%22%0AYou need the aiomysql%0A%22%22%22%0Aimport asyncio%0Aimport os%0A%0Aimport aiomysql%0Aimport uvloop%0Afrom sanic import Sanic%0Afrom sanic.response import json%0A%0Adatabase_name = os.environ%5B'DATABASE_NAME'%5D%0Adatabase_host = os.environ%5B'DATABASE_HOST'%5D%0Adatabase_user = os.environ%5B'DATABASE_USER'%5D%0Adatabase_password = os.environ%5B'DATABASE_PASSWORD'%5D%0Aapp = Sanic()%0Aasyncio.set_event_loop_policy(uvloop.EventLoopPolicy())%0A%0A%0Aasync def get_pool(*args, **kwargs):%0A %22%22%22%0A the first param in *args is the global instance ,%0A so we can store our connection pool in it .%0A and it can be used by different request%0A :param args:%0A :param kwargs:%0A :return:%0A %22%22%22%0A args%5B0%5D.pool = %7B%0A %22aiomysql%22: await aiomysql.create_pool(host=database_host, user=database_user, password=database_password,%0A db=database_name,%0A maxsize=5)%7D%0A async with args%5B0%5D.pool%5B'aiomysql'%5D.acquire() as conn:%0A async with conn.cursor() as cur:%0A await cur.execute('DROP TABLE IF EXISTS sanic_polls')%0A await cur.execute(%22%22%22CREATE TABLE sanic_polls (%0A id serial primary key,%0A question varchar(50),%0A pub_date timestamp%0A );%22%22%22)%0A for i in range(0, 100):%0A await cur.execute(%22%22%22INSERT INTO sanic_polls%0A (id, question, pub_date) VALUES (%7B%7D, %7B%7D, now())%0A %22%22%22.format(i, i))%0A%0A%[email protected](%22/%22)%0Aasync def test():%0A result = %5B%5D%0A data = %7B%7D%0A async with app.pool%5B'aiomysql'%5D.acquire() as conn:%0A async with conn.cursor() as cur:%0A await cur.execute(%22SELECT question, pub_date FROM sanic_polls%22)%0A async for row in cur:%0A result.append(%7B%22question%22: row%5B0%5D, %22pub_date%22: row%5B1%5D%7D)%0A if result or len(result) %3E 0:%0A data%5B'data'%5D = res%0A return json(data)%0A%0A%0Aif __name__ == '__main__':%0A app.run(host=%22127.0.0.1%22, workers=4, port=12000, before_start=get_pool)%0A
e7b6aef4db85c777463d2335107145b60b678ae2
Create a new tour example
examples/tour_examples/maps_introjs_tour.py
examples/tour_examples/maps_introjs_tour.py
Python
0.000012
@@ -0,0 +1,1770 @@ +from seleniumbase import BaseCase%0A%0A%0Aclass MyTourClass(BaseCase):%0A%0A def test_google_maps_tour(self):%0A self.open(%22https://www.google.com/maps/@42.3598616,-71.0912631,15z%22)%0A self.wait_for_element(%22#searchboxinput%22)%0A self.wait_for_element(%22#minimap%22)%0A self.wait_for_element(%22#zoom%22)%0A%0A self.create_tour(theme=%22introjs%22)%0A self.add_tour_step(%22Welcome to Google Maps!%22,%0A title=%22%E2%9C%85 SeleniumBase Tours %F0%9F%8C%8E%22)%0A self.add_tour_step(%22Type in a location here.%22, %22#searchboxinput%22,%0A title=%22Search Box%22)%0A self.add_tour_step(%22Then click here to show it on the map.%22,%0A %22#searchbox-searchbutton%22, alignment=%22bottom%22)%0A self.add_tour_step(%22Or click here to get driving directions.%22,%0A %22#searchbox-directions%22, alignment=%22bottom%22)%0A self.add_tour_step(%22Use this button to switch to Satellite view.%22,%0A %22#minimap div.widget-minimap%22, alignment=%22right%22)%0A self.add_tour_step(%22Click here to zoom in.%22, %22#widget-zoom-in%22,%0A alignment=%22left%22)%0A self.add_tour_step(%22Or click here to zoom out.%22, %22#widget-zoom-out%22,%0A alignment=%22left%22)%0A self.add_tour_step(%22Use the Menu button to see more options.%22,%0A %22.searchbox-hamburger-container%22, alignment=%22right%22)%0A self.add_tour_step(%22Or click here to see more Google apps.%22,%0A '%5Btitle=%22Google apps%22%5D', alignment=%22left%22)%0A self.add_tour_step(%22Thanks for using SeleniumBase Tours!%22,%0A title=%22%F0%9F%9A%83 End of Guided Tour %F0%9F%9A%83%22)%0A self.export_tour(filename=%22google_maps_introjs_tour.js%22)%0A self.play_tour()%0A
8ddc9333513a2e900ff61b6d2904db3e58635bb9
add initial self_publish version
elm_self_publish.py
elm_self_publish.py
Python
0
@@ -0,0 +1,2133 @@ +#! /usr/bin/env python%0Afrom __future__ import print_function%0A%0Aimport sys%0Aimport json%0Aimport shutil%0Aimport argparse%0A%0Adef copy_package(location, destination):%0A shutil.copytree(location, destination)%0A%0Adef package_name(url):%0A %22%22%22 get the package name from a github url %22%22%22%0A%0A project = url.split('/')%5B-1%5D.split('.')%5B0%5D%0A user = url.split('/')%5B-2%5D%0A%0A return %7B%0A %22project%22: project,%0A %22user%22: user%0A %7D%0A%0Adef self_publish(package_location, destination=%22.%22, quiet=False):%0A %22%22%22 package_location should be the local package to install%0A %22%22%22%0A%0A elm_package_file = %22%7Blocation%7D/elm-package.json%22.format(location=package_location)%0A exact_deps_file = %22%7Bdestination%7D/elm-stuff/exact-dependencies.json%22.format(%0A destination=destination,%0A location=package_location%0A )%0A%0A with open(elm_package_file) as f:%0A elm_package = json.load(f)%0A%0A%0A package_details = package_name(elm_package%5B'repository'%5D)%0A version = elm_package%5B'version'%5D%0A%0A%0A place = package_details%5B'user'%5D + '/' + package_details%5B'project'%5D%0A%0A%0A copy_package(package_location, '%7Bdestination%7D/elm-stuff/packages/%7Bplace%7D/%7Bversion%7D'.format(%0A place=place,%0A version=version,%0A destination=destination%0A ))%0A%0A%0A with open(exact_deps_file) as f:%0A data = f.read()%0A package_info = %7B%7D%0A%0A if data:%0A package_info = json.loads(data)%0A%0A with open(exact_deps_file, 'w') as f:%0A package_info%5Bplace%5D = version%0A json.dump(package_info, f, sort_keys=False, indent=4)%0A%0A with open(elm_package_file, 'w') as f:%0A elm_package%5B'dependencies'%5D%5Bplace%5D = version%0A json.dump(elm_package, f, sort_keys=False, indent=4)%0A%0A%0Adef main():%0A%0A parser = argparse.ArgumentParser(description='Publish a local package into your project')%0A%0A parser.add_argument('--quiet', '-q', action='store_true', help='don%5C't print anything', default=False)%0A%0A parser.add_argument('package_location')%0A parser.add_argument('destination')%0A args = parser.parse_args()%0A%0A self_publish(args.package_location, args.destination, quiet=args.quiet)%0A%0Aif __name__ == '__main__':%0A main()%0A
a004611ceb3402c95675a749eb9a3db764c97e51
Move cython_build_ext command to utils.distutils and put it to setup.cfg
edgedb/lang/common/distutils.py
edgedb/lang/common/distutils.py
Python
0
@@ -0,0 +1,802 @@ +##%0A# Copyright (c) 2014 Sprymix Inc.%0A# All rights reserved.%0A#%0A# See LICENSE for details.%0A##%0A%0A%0Afrom distutils.command import build_ext as _build_ext%0A%0A%0Aclass cython_build_ext(_build_ext.build_ext):%0A def __init__(self, *args, **kwargs):%0A self._ctor_args = args%0A self._ctor_kwargs = kwargs%0A self._cython = None%0A%0A def __getattribute__(self, name):%0A cython = object.__getattribute__(self, '_cython')%0A if cython is None:%0A from Cython.Distutils import build_ext%0A%0A _ctor_args = object.__getattribute__(self, '_ctor_args')%0A _ctor_kwargs = object.__getattribute__(self, '_ctor_kwargs')%0A cython = build_ext(*_ctor_args, **_ctor_kwargs)%0A object.__setattr__(self, '_cython', cython)%0A return getattr(cython, name)%0A
d05de03f258c215ce0a23023e5c15b057fbf7283
add missing import
s2plib/fusion.py
s2plib/fusion.py
# Copyright (C) 2015, Carlo de Franchis <[email protected]> # Copyright (C) 2015, Gabriele Facciolo <[email protected]> # Copyright (C) 2015, Enric Meinhardt <[email protected]> # Copyright (C) 2015, Julien Michel <[email protected]> from __future__ import print_function import os import sys import shutil import numpy as np from osgeo import gdal gdal.UseExceptions() from s2plib.config import cfg def average_if_close(x, threshold): """ """ if np.nanmax(x) - np.nanmin(x) > threshold: return np.nan else: return np.nanmedian(x) def merge_n(output, inputs, offsets, averaging='average_if_close', threshold=1): """ Merge n images of equal sizes by taking the median/mean/min/max pixelwise. Args: inputs: list of paths to the input images output: path to the output image averaging: string containing the name of a function that accepts 1D arrays. It is applied to 1D slices of the stack of images along the last axis. Possible values are, for instance np.min, np.max, np.mean, np.median and their nanproof counterparts, ie np.nanmin, np.nanmax, np.nanmean, np.nanmedian """ assert(len(inputs) == len(offsets)) # get input images size if inputs: f = gdal.Open(inputs[0]) w, h = f.RasterXSize, f.RasterYSize f = None # this is the gdal way of closing files # read input images and apply offsets x = np.empty((h, w, len(inputs))) for i, img in enumerate(inputs): f = gdal.Open(img) x[:, :, i] = f.GetRasterBand(1).ReadAsArray() - offsets[i] f = None if cfg['debug']: common.rasterio_write('{}_registered.tif'.format(os.path.splitext(img)[0]), x[:, :, i] + np.mean(offsets)) # apply the averaging operator if averaging.startswith(('np.', 'numpy.')): avg = np.apply_along_axis(getattr(sys.modules['numpy'], averaging.split('.')[1]), axis=2, arr=x) elif averaging == 'average_if_close': avg = np.apply_along_axis(average_if_close, 2, x, threshold) # add the mean offset avg += np.mean(offsets) # write the average to output if inputs: shutil.copy(inputs[0], output) # copy an input file to get the metadata f = gdal.Open(output, gdal.GA_Update) f.GetRasterBand(1).WriteArray(avg) # update the output file content f = None
Python
0.000001
@@ -445,16 +445,42 @@ ort cfg%0A +from s2plib import common%0A %0A%0Adef av
bc235b15bbeacf7fee7e1d23a5d94b6271e33e41
Add initial code
rpsls.py
rpsls.py
Python
0.000003
@@ -0,0 +1,1999 @@ +#!/usr/bin/python%0A%0Afrom collections import OrderedDict%0Afrom random import choice, seed%0Afrom sys import exit%0A%0A%0AWEAPONS = OrderedDict(%5B%0A ('rock', 1),%0A ('paper', 2),%0A ('scissors', 3),%0A ('lizard', 5),%0A ('spock', 4)%0A%5D)%0A%0A%0AEXPLANATIONS = %7B%0A 'lizardlizard': 'Lizard equals lizard',%0A 'lizardpaper': 'Lizard eats paper',%0A 'lizardrock': 'Rock crushes lizard',%0A 'lizardscissors': 'Scissors decapitate lizard',%0A 'lizardspock': 'Lizard poisons spock',%0A 'paperpaper': 'Paper equals paper',%0A 'paperrock': 'Paper wraps rock',%0A 'paperscissors': 'Scissors cut paper',%0A 'paperspock': 'Paper disproves Spock',%0A 'rockrock': 'Rock equals rock',%0A 'rockscissors': 'Rock breaks scissors',%0A 'rockspock': 'Spock vapourises rock',%0A 'scissorsscissors': 'Scissors equal scissors',%0A 'scissorsspock': 'Spock breaks scissors',%0A 'spockspock': 'Spock equals Spock'%0A%7D%0A%0A%0Adef do_battle(player_weapon, cpu_weapon):%0A explanation = EXPLANATIONS%5B''.join(sorted(%5Bplayer_weapon, cpu_weapon%5D))%5D%0A result = (WEAPONS%5Bplayer_weapon%5D - WEAPONS%5Bcpu_weapon%5D) %25 5%0A if result == 0:%0A message = 'It%5C's a draw.'%0A elif result %25 2 == 0:%0A message = 'CPU wins!'%0A else:%0A message = 'Player wins!'%0A return '%7B%7D. %7B%7D'.format(explanation, message)%0A%0A%0Adef is_valid_weapon(weapon):%0A return weapon in WEAPONS.keys()%0A%0A%0Adef get_random_weapon():%0A seed()%0A return choice(WEAPONS.keys())%0A%0A%0Adef run():%0A print 'Choose your weapon (%7B%7D), or quit:'.format(', '.join(WEAPONS.keys()))%0A player_weapon = raw_input('%3E ').lower()%0A if player_weapon == 'quit':%0A print 'Thanks for playing.'%0A exit()%0A if not is_valid_weapon(player_weapon):%0A print '%5C'%7B%7D%5C' is not a valid weapon, try again.%5Cn'.format(player_weapon)%0A run()%0A cpu_weapon = get_random_weapon()%0A print '(Player) %7B%7D - vs - %7B%7D (CPU)'.format(player_weapon, cpu_weapon)%0A print '%7B%7D%5Cn'.format(do_battle(player_weapon, cpu_weapon))%0A run()%0A%0A%0Aif __name__ == '__main__':%0A run()%0A%0A%0A
43c74dc2dbe82a30f7a9b6c0403db39eb159fc96
add control panel test for fetch
paystackapi/tests/test_cpanel.py
paystackapi/tests/test_cpanel.py
Python
0
@@ -0,0 +1,691 @@ +import httpretty%0A%0Afrom paystackapi.tests.base_test_case import BaseTestCase%0Afrom paystackapi.cpanel import ControlPanel%0A%0A%0Aclass TestPage(BaseTestCase):%0A%0A @httpretty.activate%0A def test_fetch_payment_session_timeout(self):%0A %22%22%22Method defined to test fetch payment session timeout.%22%22%22%0A httpretty.register_uri(%0A httpretty.get,%0A self.endpoint_url(%22/integration/payment_session_timeout%22),%0A content_type='text/json',%0A body='%7B%22status%22: true, %22message%22: %22Payment session timeout retrieved%22%7D',%0A status=201,%0A )%0A%0A response = ControlPanel.fetch_payment_session_timeout()%0A self.assertTrue(response%5B'status'%5D)%0A
233db6d2decad39c98bf5cbe8b974f93308bea16
Create re.py
python2.7/re.py
python2.7/re.py
Python
0.000001
@@ -0,0 +1,990 @@ +#/usr/bin/python%0Aimport re%0A%0A#Shows how to test if a string matches a regular expression (yes/no) and uses more than one modifier%0Aexpression = re.compile(r%22%5E%5Cw+.+string%22, re.I %7C re.S)%09#compile the expression%0Aif expression.match(%22A Simple String To Test%22):%09%09#See if a string matches it%0A%09print %22Matched%22%0Aelse:%0A%09print %22Did Not Match%22%0A%0A#Splitting with a regular expression%0Ascalar_list = %22item 1, item 2, item 3%22%09#A text string delimitted by comma and variable whitespace%0Aitems = re.split(%22,%5Cs+%22, scalar_list) %09%09#Splitting this up into an array called items%0Aprint items%5B1%5D + %22:%22 + items%5B0%5D%09%09%09%09#printing a couple of the elements%0A%0A#Extraction/parsing%0Aparse_this = %22Text with some digits: 1234 and some hexidecimal deadbeef1337%22%0Aextractions = re.compile(r%22%5B%5E%5Cd%5D+(%5Cd+).+%5Cs(%5B0-9a-f%5D+)$%22)%09%09#Our regex; groups we want in ()'s%0Apeices = extractions.match(parse_this)%09%09%09%09%09%09%09#exec our re and result in peices%0Aprint %22Number: %22 + peices.group(1) + %22 Hex:%22 + peices.group(2)%09#display both extracted groups%0A
d93916b1927f0ae099cee3cf93619d3113db147b
Add small example of basic anomaly detection w/peewee.
examples/anomaly_detection.py
examples/anomaly_detection.py
Python
0
@@ -0,0 +1,1697 @@ +import math%0Afrom peewee import *%0A%0A%0Adb = SqliteDatabase(':memory:')%0A%0Aclass Reg(Model):%0A key = TextField()%0A value = IntegerField()%0A%0A class Meta:%0A database = db%0A%0A%0Adb.create_tables(%5BReg%5D)%0A%0A# Create a user-defined aggregate function suitable for computing the standard%0A# deviation of a series.%[email protected]('stddev')%0Aclass StdDev(object):%0A def __init__(self):%0A self.n = 0%0A self.values = %5B%5D%0A%0A def step(self, value):%0A self.n += 1%0A self.values.append(value)%0A%0A def finalize(self):%0A if self.n %3C 2:%0A return 0%0A mean = sum(self.values) / self.n%0A sqsum = sum((i - mean) ** 2 for i in self.values)%0A return math.sqrt(sqsum / (self.n - 1))%0A%0A%0Avalues = %5B2, 3, 5, 2, 3, 12, 5, 3, 4, 1, 2, 1, -9, 3, 3, 5%5D%0A%0AReg.create_table()%0AReg.insert_many(%5B%7B'key': 'k%2502d' %25 i, 'value': v%7D%0A for i, v in enumerate(values)%5D).execute()%0A%0A# We'll calculate the mean and the standard deviation of the series in a common%0A# table expression, which will then be used by our query to find rows whose%0A# zscore exceeds a certain threshold.%0Acte = (Reg%0A .select(fn.avg(Reg.value), fn.stddev(Reg.value))%0A .cte('stats', columns=('series_mean', 'series_stddev')))%0A%0A# The zscore is defined as the (value - mean) / stddev.%0Azscore = (Reg.value - cte.c.series_mean) / cte.c.series_stddev%0A%0A# Find rows which fall outside of 2 standard deviations.%0Athreshold = 2%0Aquery = (Reg%0A .select(Reg.key, Reg.value, zscore.alias('zscore'))%0A .from_(Reg, cte)%0A .where((zscore %3E= threshold) %7C (zscore %3C= -threshold))%0A .with_cte(cte))%0A%0Afor row in query:%0A print(row.key, row.value, round(row.zscore, 2))%0A%0Adb.close()%0A
b5eaf371bf4b19818a3c339ee86f2411d95115db
Add user and group in ini files
setup.py
setup.py
import os import sys import re import pwd import grp # Utility function to read the README file. # Used for the long_description. It's nice, because now 1) we have a top level # README file and 2) it's easier to type in the README file than to put a raw # string in below ... from setuptools import setup from setuptools import find_packages from itertools import chain from glob import glob try: python_version = sys.version_info except: python_version = (1, 5) if python_version < (2, 6): sys.exit("Shinken require as a minimum Python 2.6.x, sorry") elif python_version >= (3,): sys.exit("Shinken is not yet compatible with Python3k, sorry") user = 'shinken' group = 'shinken' package_data = ['*.py', 'modules/*.py', 'modules/*/*.py'] def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() # Define files if 'win' in sys.platform: default_paths = {'var': "c:\\shinken\\var", 'share': "c:\\shinken\\var\\share", 'etc': "c:\\shinken\\etc", 'log': "c:\\shinken\\var", 'run': "c:\\shinken\\var", 'libexec': "c:\\shinken\\libexec", } data_files = [] elif 'linux' in sys.platform or 'sunos5' in sys.platform: default_paths = {'var': "/var/lib/shinken/", 'share': "/var/lib/shinken/share", 'etc': "/etc/shinken", 'run': "/var/run/shinken", 'log': "/var/log/shinken", 'libexec': "/usr/lib/shinken/plugins", } data_files = [ ( os.path.join('/etc', 'init.d'), ['bin/init.d/shinken', 'bin/init.d/shinken-arbiter', 'bin/init.d/shinken-broker', 'bin/init.d/shinken-receiver', 'bin/init.d/shinken-poller', 'bin/init.d/shinken-reactionner', 'bin/init.d/shinken-scheduler', ] ) ] elif 'bsd' in sys.platform or 'dragonfly' in sys.platform: default_paths = {'var': "/usr/local/libexec/shinken", 'share': "/usr/local/share/shinken", 'etc': "/usr/local/etc/shinken", 'run': "/var/run/shinken", 'log': "/var/log/shinken", 'libexec': "/usr/local/libexec/shinken/plugins", } data_files = [ ( '/usr/local/etc/rc.d', ['bin/rc.d/shinken_arbiter', 'bin/rc.d/shinken_broker', 'bin/rc.d/shinken_receiver', 'bin/rc.d/shinken_poller', 'bin/rc.d/shinken_reactionner', 'bin/rc.d/shinken_scheduler', ] ) ] else: raise "Unsupported platform, sorry" data_files = [] ## get all files + under-files in etc/ except daemons folder daemonsini = [] for path, subdirs, files in os.walk('etc'): for name in files: if 'daemons' in path: daemonsini.append(os.path.join(os.path.join(default_paths['etc'], re.sub(r"^(etc\/|etc$)", "", path), name))) data_files.append( (os.path.join(default_paths['etc'], re.sub(r"^(etc\/|etc$)", "", path)), [os.path.join(path, name)]) ) #if 'daemons' in path: #else: #for name in files: #print os.path.join(path, name) paths = ('modules', 'doc', 'inventory', 'cli') for path, subdirs, files in chain.from_iterable(os.walk(patho) for patho in paths): for name in files: data_files.append( (os.path.join(default_paths['var'], path), [os.path.join(path, name)])) for path, subdirs, files in os.walk('share'): for name in files: data_files.append( (os.path.join(default_paths['share'], re.sub(r"^(share\/|share$)", "", path)), [os.path.join(path, name)]) ) for path, subdirs, files in os.walk('libexec'): for name in files: data_files.append( (os.path.join(default_paths['libexec'], re.sub(r"^(libexec\/|libexec$)", "", path)), [os.path.join(path, name)]) ) data_files.append( (default_paths['run'], []) ) data_files.append( (default_paths['log'], []) ) # compute scripts scripts = [ s for s in glob('bin/shinken*') if not s.endswith('.py')] setup( name="Shinken", version="2.0-RC8", packages=find_packages(), package_data={'': package_data}, description="Shinken is a monitoring tool compatible with Nagios configuration and plugins", long_description=read('README.rst'), author="Gabes Jean", author_email="[email protected]", license="GNU Affero General Public License", url="http://www.shinken-monitoring.org", zip_safe=False, classifiers=[ 'Development Status :: 5 - Production/Stable', 'Environment :: Console', 'Intended Audience :: System Administrators', 'License :: OSI Approved :: GNU Affero General Public License v3', 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX', 'Programming Language :: Python', 'Topic :: System :: Monitoring', 'Topic :: System :: Networking :: Monitoring', ], scripts=scripts, data_files = data_files ) if not '/var/lib/shinken/' in default_paths['var']: for file in daemonsini: if not 'modules_dir=' in open(file).read(): with open(file, "a") as inifile: inifile.write("modules_dir=" + default_paths['var'] + "/modules") paths = (default_paths['run'], default_paths['log']) uid = pwd.getpwnam(user).pw_uid gid = grp.getgrnam(group).gr_gid for path in paths: os.chown(path, uid, gid) print "Shinken setup done"
Python
0
@@ -5691,16 +5691,307 @@ /modules +%5Cn%22)%0A if not 'user=' in open(file).read():%0A with open(file, %22a%22) as inifile:%0A inifile.write(%22user=%22 + user + %22%5Cn%22)%0A if not 'group=' in open(file).read():%0A with open(file, %22a%22) as inifile:%0A inifile.write(%22group=%22 + group + %22%5Cn %22)%0A%0Apath
12b334983be4caf0ba97534b52f928180e31e564
add quick script to release lock
release-lock.py
release-lock.py
Python
0
@@ -0,0 +1,76 @@ +from batch import Lock%0A%0Alock = Lock(key=%22charge-cards-lock%22)%0Alock.release()%0A
687a186bd29eb1bef7a134fa5499c9b4c56abaa6
Create setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,643 @@ +from distutils.core import setup%0Aimport py2exe, os, pygame%0AorigIsSystemDLL = py2exe.build_exe.isSystemDLL%0Adef isSystemDLL(pathname):%0A if os.path.basename(pathname).lower() in %5B%22sdl_ttf.dll%22%5D:%0A return 0%0A return origIsSystemDLL(pathname)%0Apy2exe.build_exe.isSystemDLL = isSystemDLL%0Apygamedir = os.path.split(pygame.base.__file__)%5B0%5D%0Aos.path.join(pygamedir, pygame.font.get_default_font()),%0Aos.path.join(pygamedir, 'SDL.dll'),%0Aos.path.join(pygamedir, 'SDL_ttf.dll')%0A%0Asetup(%0A console=%5B%22pick_a_number.py%22%5D,%0A options=%7B%0A %22py2exe%22:%7B%0A %22packages%22: %5B%22pygame%22%5D%0A %7D%0A %7D%0A)%0A
77ca6d5e6ef7e07ede92fa2b4566a90c31fd7845
Bump grappelli and filebrowser versions.
setup.py
setup.py
from __future__ import with_statement import os exclude = ["mezzanine/project_template/dev.db", "mezzanine/project_template/local_settings.py"] exclude = dict([(e, None) for e in exclude]) for e in exclude: if e.endswith(".py"): try: os.remove("%sc" % e) except: pass try: with open(e, "r") as f: exclude[e] = (f.read(), os.stat(e)) os.remove(e) except: pass from setuptools import setup, find_packages from mezzanine import __version__ as version install_requires = [ "django >= 1.3.1", "filebrowser_safe == 0.2.3", "grappelli_safe == 0.2.2", ] try: from PIL import Image, ImageOps except ImportError: install_requires += ["pillow"] try: setup( name="Mezzanine", version=version, author="Stephen McDonald", author_email="[email protected]", description="An open source content management platform built using " "the Django framework.", long_description=open("README.rst").read(), license="BSD", url="http://mezzanine.jupo.org/", zip_safe=False, include_package_data=True, packages=find_packages(), install_requires=install_requires, entry_points=""" [console_scripts] mezzanine-project=mezzanine.bin.mezzanine_project:create_project """, classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Software Development :: Libraries :: " "Application Frameworks", "Topic :: Software Development :: Libraries :: Python Modules", ]) finally: for e in exclude: if exclude[e] is not None: data, stat = exclude[e] try: with open(e, "w") as f: f.write(data) os.chown(e, stat.st_uid, stat.st_gid) os.chmod(e, stat.st_mode) except: pass
Python
0
@@ -619,17 +619,17 @@ == 0.2. -3 +4 %22,%0A %22 @@ -650,17 +650,17 @@ == 0.2. -2 +4 %22,%0A%5D%0A%0Atr
8dfdcfa0f1d13e810a6e56e0a031f15dbaba3656
Use environment metadata for conditional dependencies
setup.py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import sys import djangocms_blog try: from setuptools import setup except ImportError: from distutils.core import setup version = djangocms_blog.__version__ if sys.argv[-1] == 'publish': os.system('python setup.py sdist upload') print("You probably want to also tag the version now:") print(" git tag -a %s -m 'version %s'" % (version, version)) print(" git push --tags") sys.exit() readme = open('README.rst').read() history = open('HISTORY.rst').read().replace('.. :changelog:', '') setup( name='djangocms-blog', version=version, description='A djangoCMS 3 blog application', long_description=readme + '\n\n' + history, author='Iacopo Spalletti', author_email='[email protected]', url='https://github.com/nephila/djangocms-blog', packages=[ 'djangocms_blog', ], include_package_data=True, install_requires=[ 'django-parler>=1.2', 'django-cms>=3.0', 'django-taggit', 'django-filer', 'django-select2' if sys.version_info[0]==2 else 'django-select2-py3', 'pytz', 'django-taggit-templatetags', 'django-taggit-autosuggest', 'django-admin-enhancer', 'djangocms-text-ckeditor', 'cmsplugin-filer', 'django-meta>=0.2', 'django-meta-mixin>=0.1.1', 'south>=1.0.1', ], license="BSD", zip_safe=False, keywords='djangocms-blog, blog, django, wordpress, multilingual', test_suite='cms_helper.run', classifiers=[ 'Development Status :: 4 - Beta', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Natural Language :: English', "Programming Language :: Python :: 2", 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', ], )
Python
0
@@ -1064,86 +1064,8 @@ r',%0A - 'django-select2' if sys.version_info%5B0%5D==2 else 'django-select2-py3',%0A @@ -1337,24 +1337,268 @@ .1',%0A %5D,%0A + extras_require=%7B%0A %22:python_version=='3.3'%22: %5B'django-select2-py3'%5D,%0A %22:python_version=='3.4'%22: %5B'django-select2-py3'%5D,%0A %22:python_version=='2.6'%22: %5B'django-select2'%5D,%0A %22:python_version=='2.7'%22: %5B'django-select2'%5D,%0A %7D,%0A license=
2e57e929db19ebd864680d4616eb1bba595f1e57
Create setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,471 @@ +from distutils.core import setup%0Asetup(%0A name = 'fram3w0rk-python',%0A packages = %5B'fram3w0rk-python'%5D,%0A version = '0.5',%0A description = '%22Class%22 effort to unify functions across 30 languages.',%0A author = 'Jonathan Lawton',%0A author_email = '[email protected]',%0A url = 'https://github.com/LawtonSoft/Fram3w0rk-Python',%0A download_url = 'https://github.com/LawtonSoft/Fram3work-Python/tarball/0.1',%0A keywords = %5B'fram3w0rk', 'mvc', 'web'%5D,%0A classifiers = %5B%5D,%0A)%0A
b0184d74d0f186662df8596f511f95e1130bcf20
Add libffi package
rules/libffi.py
rules/libffi.py
Python
0
@@ -0,0 +1,204 @@ +import xyz%0Aimport os%0Aimport shutil%0A%0Aclass Libffi(xyz.BuildProtocol):%0A pkg_name = 'libffi'%0A%0A def configure(self, builder, config):%0A builder.host_lib_configure(config=config)%0A%0Arules = Libffi()%0A
b1d08df29b02c107bbb2f2edc9add0c6f486c530
Add app
app.py
app.py
Python
0.000002
@@ -0,0 +1,494 @@ +# coding: utf-8%0Aimport json%0A%0Aimport flask%0Afrom flask import request%0Aimport telegram%0A%0A__name__ = u'eth0_bot'%0A__author__ = u'Joker_Qyou'%0A__config__ = u'config.json'%0A%0Aapp = flask.Flask(__name__)%0Aapp.debug = False%0A%0Awith open(__config__, 'r') as cfr:%0A config = json.loads(cfr.read())%0A%0Abot = telegram.Bot(token=token_info)%0Abot.setWebhook(u'%25(server)s/%25(token)s' %25 config)%0A%[email protected](u'/%25s' %25 config.get('token').split(':')%5B-1%5D)%0Adef webhook():%0A ''' WebHook API func '''%0A print request.POST%0A%0A
69f787a69e400b69fa4aef2e49f6f03781304dae
Update setup.py.
setup.py
setup.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright (C) 2013 Alexander Shorin # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. # from astm.version import __version__ try: from setuptools import setup, find_packages except ImportError: from distutils.core import setup # http://wiki.python.org/moin/Distutils/Cookbook/AutoPackageDiscovery import os def is_package(path): return ( os.path.isdir(path) and os.path.isfile(os.path.join(path, '__init__.py')) ) def find_packages(path='.', base=""): """ Find all packages in path """ packages = {} for item in os.listdir(path): dir = os.path.join(path, item) if is_package( dir ): if base: module_name = "%(base)s.%(item)s" % vars() else: module_name = item packages[module_name] = dir packages.update(find_packages(dir, module_name)) return packages setup( name = 'astm', version = __version__, description = 'Python implementation of ASTM E1381/1394 protocol.', author = 'Alexander Shorin', author_email = '[email protected]', license = 'BSD', url = 'http://code.google.com/p/python-astm', install_requires = [], test_suite = 'astm.tests', zip_safe = True, classifiers = [ 'Development Status :: 4 - Beta', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.3', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Scientific/Engineering :: Medical Science Apps.' ], packages = find_packages(), )
Python
0
@@ -833,21 +833,19 @@ package( - dir - ):%0A @@ -1246,16 +1246,62 @@ tocol.', +%0A long_description = open('README').read(), %0A%0A au
19b6d71e17f616bed3566d5615b5938bbfe3a497
Add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,337 @@ +#!/usr/bin/env python%0A%0Afrom distutils.core import setup%0A%0Asetup(name='hydrus',%0A version='0.0.1',%0A description='A space-based application for W3C HYDRA Draft',%0A author='W3C HYDRA development group',%0A author_email='[email protected]',%0A url='https://github.com/HTTP-APIs/hydrus',%0A packages=%5B'flask==0.11'%5D,%0A)%0A
e2ae0798424d4aa0577e22d563646856866fbd1f
add setup.py file for pypi
setup.py
setup.py
Python
0
@@ -0,0 +1,618 @@ +import os%0Afrom setuptools import setup, find_packages%0A%0Aimport versioncheck%0A%0A%0Adef read(fname):%0A return open(os.path.join(os.path.dirname(__file__), fname)).read()%0A%0Asetup(%0A name='django-versioncheck',%0A version=versioncheck.__version__,%0A description='A small django app which tries to be annoying if your django version is outdated.',%0A long_description=read('README.md'),%0A license='MIT License',%0A author='Richard Stromer',%0A author_email='[email protected]',%0A url='https://github.com/noxan/django-versioncheck',%0A packages=find_packages(),%0A install_requires=%5B%0A 'django',%0A %5D,%0A)%0A
d43bcc978b1d79a20820ab1df73bd69d5d3c100d
Add setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,796 @@ +from setuptools import find_packages%0Afrom setuptools import setup%0A%0AVERSION = '0.0.1'%0A%0Asetup_args = dict(%0A name='BigQuery-Python',%0A description='Simple Python client for interacting with Google BigQuery.',%0A url='https://github.com/tylertreat/BigQuery-Python',%0A version=VERSION,%0A license='Apache',%0A packages=find_packages(),%0A include_package_data=True,%0A install_requires=%5B'google-api-python-client', 'pyopenssl'%5D,%0A author='Tyler Treat',%0A author_email='[email protected]',%0A classifiers=%5B%0A 'Development Status :: 4 - Beta',%0A 'Environment :: Web Environment',%0A 'Intended Audience :: Developers',%0A 'Operating System :: OS Independent',%0A 'Programming Language :: Python',%0A %5D,%0A)%0A%0Aif __name__ == '__main__':%0A setup(**setup_args)%0A%0A
840e178a85da246d8357481a8e6ea5a8d87deef7
Create setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,1198 @@ +%22%22%22%0AKonF'00'%0A~~~~~~~~%0A%0AKonFoo is a Python Package for creating byte stream mappers in a declarative%0Away with as little code as necessary to help fighting the confusion with the%0Afoo of the all too well-known memory dumps or binary data.%0A%0ASetup%0A-----%0A%0A.. code:: bash%0A $ pip install KonFoo%0A%0ALinks%0A-----%0A* %60website %3Chttp://github.com/JoeVirtual/KonFoo/%3E%60_%0A* %60documentation %3Chttp://github.com/JoeVirtual/KonFoo/master/docs/%3E%60_%0A* %60development version%0A %3Chttp://github.com/JoeVirtual/KonFoo/master%3E%60_%0A%0A%0A%22%22%22%0A%0Aimport re%0Aimport ast%0A%0Afrom setuptools import setup%0A%0A_version_re = re.compile(r'__version__%5Cs+=%5Cs+(.*)')%0A%0Awith open('konfoo/__init__.py', 'rb') as f:%0A version = str(ast.literal_eval(_version_re.search(%0A f.read().decode('utf-8')).group(1)))%0A%0Asetup(%0A name='KonFoo',%0A version=version,%0A license='BSD',%0A author='Jochen Gerhaeusser',%0A author_email='[email protected]',%0A url='http://github.com/JoeVirtual/KonFoo',%0A description='A declarative byte stream mapping engine.',%0A long_description=__doc__,%0A packages=%5B'konfoo'%5D,%0A install_requires=%5B%5D,%0A classifiers=%5B%0A 'License :: BSD License',%0A 'Programming Language :: Python :: 3',%0A %5D%0A)%0A
10ccc510deab5c97ce8a6c5ee57232c5e399986e
Add decision tree classifier attempt.
decision_tree.py
decision_tree.py
Python
0.000004
@@ -0,0 +1,553 @@ +import pandas as pd%0Afrom sklearn import tree%0A%0A# X = %5B%5B0, 1%5D, %5B1, 1%5D%5D%0A# Y = %5B0, 1%5D%0A#clf = tree.DecisionTreeClassifier()%0A#clf = clf.fit(X, Y)%0A%0Adata = pd.read_excel('/home/andre/sandbox/jhu-immuno/journal.pcbi.1003266.s001-2.XLS')%0A%0Aresp_cols = %5B 'MHC' %5D%0A%0Adata%5B'y'%5D = data.Immunogenicity.map(%7B'non-immunogenic': 0, 'immunogenic': 1 %7D)%0A%0AX = data%5Bresp_cols%5D%0AY = data.y%0Aclf = tree.DecisionTreeClassifier()%0A%0Adummy = pd.get_dummies(data.MHC)%0A%0Aclf.fit(dummy, Y)%0A%0Afrom sklearn.externals.six import StringIO%0Af = tree.export_graphviz(clf, out_file = 'decision_tree')
efe596e3f935fe31af5bcbd8ef1afbb6750be123
add a setup.py
setup.py
setup.py
Python
0.000001
@@ -0,0 +1,879 @@ +%22%22%22Set up the kd project%22%22%22%0A%0A%0Afrom setuptools import setup%0A%0A%0Aimport kd%0A%0A%0Asetup(%0A name='kd',%0A version=kd.__version__,%0A url='https://github.com/jalanb/kd',%0A license='MIT License',%0A author='J Alan Brogan',%0A author_email='[email protected]',%0A description='kd is a smarter cd',%0A platforms='any',%0A classifiers=%5B%0A 'Programming Language :: Python :: 2.7',%0A 'Development Status :: 2 - Pre-Alpha',%0A 'Natural Language :: English',%0A 'Environment :: Console',%0A 'Intended Audience :: Developers',%0A 'Intended Audience :: System Administrators',%0A 'License :: OSI Approved :: MIT License',%0A 'Operating System :: Unix',%0A 'Topic :: System :: Shells',%0A %5D,%0A test_suite='nose.collector',%0A tests_require=%5B'nose'%5D,%0A extras_require=%7B%0A 'docs': %5B'Sphinx'%5D,%0A 'testing': %5B'nose'%5D,%0A %7D%0A)%0A
f2c03f66fd09c1a86fdf88fc60b6db53d9d50a5b
Version 0.5.1
setup.py
setup.py
#!/usr/bin/python # # Copyright 2012 Major Hayden # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup( name='supernova', version='0.5.0', author='Major Hayden', author_email='[email protected]', description="novaclient wrapper for multiple nova environments", packages=['supernova'], url='https://github.com/rackerhacker/supernova', include_package_data=True, long_description=read('README.md'), entry_points={ 'console_scripts': ['supernova = supernova.supernova:bin_helper'] } )
Python
0.000001
@@ -785,17 +785,17 @@ on='0.5. -0 +1 ',%0A a
9220523e6bcac6b80410a099b2f2fd30d7cbb7d3
Add first draft of setup.py
setup.py
setup.py
Python
0
@@ -0,0 +1,369 @@ +from setuptools import setup%0A%0Asetup(%0A name = 'pyAPT',%0A version = '0.1.0',%0A author = 'Christoph Weinsheimer',%0A author_email = '[email protected]',%0A packages = %5B'pyAPT'%5D,%0A scripts = %5B%5D,%0A description = 'Controller module for Thorlabs motorized stages',%0A install_requires = %5B%5D,%0A)%0A