commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
4d500d9abe2da28cdd9bd95019048de445aac265
|
Add a history demo in documentation.
|
docs/source/tutorial/v5/history_demo.py
|
docs/source/tutorial/v5/history_demo.py
|
Python
| 0 |
@@ -0,0 +1,850 @@
+# coding: utf-8%0Afrom deprecated.history import deprecated%0Afrom deprecated.history import versionadded%0Afrom deprecated.history import versionchanged%0A%0A%0A@deprecated(%0A reason=%22%22%22%0A This is deprecated, really. So you need to use another function.%0A But I don%5C't know which one.%0A %0A - The first,%0A - The second.%0A %0A Just guess!%0A %22%22%22,%0A version='0.3.0')%0A@versionchanged(%0A reason='Well, I add a new feature in this function. '%0A 'It is very useful as you can see in the example below, so try it. '%0A 'This is a very very very very very long sentence.',%0A version='0.2.0')%0A@versionadded(%0A reason='Here is my new function.',%0A version='0.1.0')%0Adef successor(n):%0A %22%22%22%0A Calculate the successor of a number.%0A%0A :param n: a number%0A :return: number + 1%0A %22%22%22%0A return n + 1%0A%0A%0Ahelp(successor)%0A
|
|
361333f8b214097469389d0219f339fc59ea469b
|
Add permissions.py
|
teams/permisssions.py
|
teams/permisssions.py
|
Python
| 0.000001 |
@@ -0,0 +1,287 @@
+from rest_framework.permissions import BasePermission%0A%0Aclass IsOwnerPermission(BasePermission):%0A def has_permission(self, request, view):%0A return request.user.is_authenticated()%0A%0A def has_object_permission(self, request, view, obj):%0A return request.user == obj.owner%0A
|
|
7cb839279bc62b95eb7367814ef71c046d4b2184
|
Add 'examples' module which contains some examplary function examples.
|
tssim/examples.py
|
tssim/examples.py
|
Python
| 0 |
@@ -0,0 +1,725 @@
+%22%22%22This module contains example time functions%22%22%22%0A%0Aimport numpy as np%0A%0A%0Adef rand_lin_noise():%0A beta = np.random.normal()%0A return lambda x: beta * x + np.random.random(size=len(x))%0A%0A%0Adef const_lin_noise(x):%0A beta = np.random.normal()%0A return beta * x + np.random.random(size=len(x))%0A%0A%0Adef random_walk(x):%0A return np.cumsum(np.random.normal(size=x.shape%5B0%5D))%0A%0A%0Adef random_walk_limit(limit=2):%0A vals = %7B%22current%22: 0%7D%0A%0A def walk(value):%0A new_val = np.random.normal()%0A%0A if vals%5B%22current%22%5D %3E= limit:%0A new_val = -abs(new_val)%0A elif vals%5B%22current%22%5D %3C= -limit:%0A new_val = abs(new_val)%0A%0A vals%5B%22current%22%5D += new_val%0A return vals%5B%22current%22%5D%0A%0A return walk%0A%0A
|
|
c156ad1379d842924b928c6c80f668f9875e840a
|
Remove page-filter flag. (which is now user-filter)
|
tools/telemetry/telemetry/story/story_filter.py
|
tools/telemetry/telemetry/story/story_filter.py
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import optparse
import re
from telemetry.internal.util import command_line
class _StoryMatcher(object):
def __init__(self, pattern):
self._regex = None
self.has_compile_error = False
if pattern:
try:
self._regex = re.compile(pattern)
except re.error:
self.has_compile_error = True
def __nonzero__(self):
return self._regex is not None
def HasMatch(self, story):
return self and bool(
self._regex.search(story.display_name) or
(story.name and self._regex.search(story.name)))
class _StoryLabelMatcher(object):
def __init__(self, labels_str):
self._labels = labels_str.split(',') if labels_str else None
def __nonzero__(self):
return self._labels is not None
def HasLabelIn(self, story):
return self and bool(story.labels.intersection(self._labels))
class StoryFilter(command_line.ArgumentHandlerMixIn):
"""Filters stories in the story set based on command-line flags."""
@classmethod
def AddCommandLineArgs(cls, parser):
group = optparse.OptionGroup(parser, 'User story filtering options')
group.add_option('--story-filter',
help='Use only stories whose names match the given filter regexp.')
group.add_option('--page-filter', dest='story_filter',
help='Deprecated. Use --story-filter instead.')
group.add_option('--story-filter-exclude',
help='Exclude stories whose names match the given filter regexp.')
group.add_option('--story-label-filter',
help='Use only stories that have any of these labels')
group.add_option('--story-label-filter-exclude',
help='Exclude stories that have any of these labels')
parser.add_option_group(group)
@classmethod
def ProcessCommandLineArgs(cls, parser, args):
cls._include_regex = _StoryMatcher(args.story_filter)
cls._exclude_regex = _StoryMatcher(args.story_filter_exclude)
cls._include_labels = _StoryLabelMatcher(args.story_label_filter)
cls._exclude_labels = _StoryLabelMatcher(args.story_label_filter_exclude)
if cls._include_regex.has_compile_error:
raise parser.error('--story-filter: Invalid regex.')
if cls._exclude_regex.has_compile_error:
raise parser.error('--story-filter-exclude: Invalid regex.')
@classmethod
def IsSelected(cls, story):
# Exclude filters take priority.
if cls._exclude_labels.HasLabelIn(story):
return False
if cls._exclude_regex.HasMatch(story):
return False
if cls._include_labels and not cls._include_labels.HasLabelIn(story):
return False
if cls._include_regex and not cls._include_regex.HasMatch(story):
return False
return True
|
Python
| 0 |
@@ -1374,123 +1374,8 @@
.')%0A
- group.add_option('--page-filter', dest='story_filter',%0A help='Deprecated. Use --story-filter instead.')%0A
|
c39c086f51963678769c1066637ca573c721e827
|
Create a simple static gallery script.
|
static_gallery.py
|
static_gallery.py
|
Python
| 0 |
@@ -0,0 +1,1929 @@
+from . import flag%0A#from go import html%0Afrom go import os%0Afrom go import path/filepath%0A%0Adef ReadAlbumDirs(input_dir):%0A f = os.Open(input_dir)%0A with defer f.Close():%0A names = f.Readdirnames(-1)%0A for name in names:%0A stat = os.Stat(filepath.Join(input_dir, name))%0A if stat.IsDir():%0A yield name%0A%0Adef RenderDir(album_names, output_dir):%0A index = filepath.Join(output_dir, 'index.html')%0A f = os.Create(index)%0A with defer f.Close():%0A f.Write('%3Chtml%3E%3Cbody%3E%3Ch3%3EGallery %25s%3C/h3%3E %3Cul%3E%5Cn' %25 output_dir)%0A for name in album_names:%0A f.Write('%3Cli%3E%3Ca href=%22%25s%22%3E%25q%3C/a%3E%3C/li%3E%5Cn' %25 (name, name))%0A%0Adef ReadPhotosInDir(input_dir):%0A f = os.Open(input_dir)%0A with defer f.Close():%0A names = f.Readdirnames(-1)%0A for name in names:%0A stat = os.Stat(filepath.Join(input_dir, name))%0A if stat.IsDir() == False:%0A yield name%0A%0Adef RenderAlbum(photo_names, output_dir):%0A index = filepath.Join(output_dir, 'index.html')%0A f = os.Create(index)%0A with defer f.Close():%0A f.Write('%3Chtml%3E%3Cbody%3E%3Ch3%3EAlbum %25s%3C/h3%3E %3Cul%3E%5Cn' %25 output_dir)%0A for name in photo_names:%0A f.Write('%3Cli%3E%3Ca href=%22%25s%22%3E%3Cimg src=%22%25s%22 /%3E%3C/a%3E%3C/li%3E%5Cn' %25 (name, name))%0A%0Adef LinkPhotos(photo_names, input_dir, output_dir):%0A for photo in photo_names:%0A photo_orig = filepath.Join(input_dir, photo)%0A photo_dest = filepath.Join(output_dir, photo)%0A os.Link(photo_orig, photo_dest)%0A%0A%0Ainput_dir = flag.String('input', '', 'The input directory.')%0Aoutput_dir = flag.String('output', '', 'The output directory.')%0A%0Adef main(argv):%0A argv = flag.Munch(argv)%0A%0A album_dirs = list(ReadAlbumDirs(input_dir.X))%0A RenderDir(album_dirs, output_dir.X)%0A for dir in album_dirs:%0A photo_dir = filepath.Join(input_dir.X, dir)%0A output_dir = filepath.Join(output_dir.X, dir)%0A photos = list(ReadPhotosInDir(photo_dir))%0A os.MkdirAll(output_dir, os.ModePerm)%0A RenderAlbum(photos, output_dir)%0A LinkPhotos(photos, photo_dir, output_dir)%0A
|
|
f083789e5615d15715f49a7dbdb25505aa5efae2
|
Initialize P1_assignChores
|
books/AutomateTheBoringStuffWithPython/Chapter16/PracticeProjects/P1_assignChores.py
|
books/AutomateTheBoringStuffWithPython/Chapter16/PracticeProjects/P1_assignChores.py
|
Python
| 0.000124 |
@@ -0,0 +1,471 @@
+# Write a program that takes a list of people%E2%80%99s email addresses and a list of chores%0A# that need to be done and randomly assigns chores to people. Email each person their%0A# assigned chores.%0A#%0A# If you%E2%80%99re feeling ambitious, keep a record of each person%E2%80%99s previously assigned%0A# chores so that you can make sure the program avoids assigning anyone the same chore%0A# they did last time.%0A#%0A# For another possible feature, schedule the program to run once a week automatically.%0A
|
|
54a9b637aad85a20f3e865185ffed0abfd4192cd
|
Create tutorial4.py
|
tutorial4.py
|
tutorial4.py
|
Python
| 0 |
@@ -0,0 +1,813 @@
+from ggame import App, RectangleAsset, ImageAsset, Sprite, LineStyle, Color, Frame%0A%0ASCREEN_WIDTH = 640%0ASCREEN_HEIGHT = 480%0A%0Aclass SpaceShip(Sprite):%0A %22%22%22%0A Animated space ship%0A %22%22%22%0A asset = ImageAsset(%22images/four_spaceship_by_albertov_with_thrust.png%22, %0A Frame(227,0,292-227,125), 4, 'vertical')%0A%0A def __init__(self, position):%0A super().__init__(SpaceShip.asset, position)%0Aclass SpaceGame(App):%0A %22%22%22%0A Tutorial4 space game example.%0A %22%22%22%0A def __init__(self, width, height):%0A super().__init__(width, height)%0A # Background%0A black = Color(0, 1)%0A noline = LineStyle(0, black)%0A bg_asset = RectangleAsset(SCREEN_WIDTH, SCREEN_HEIGHT, noline, black)%0A bg = Sprite(bg_asset, (0,0))%0Amyapp = SpaceGame(SCREEN_WIDTH, SCREEN_HEIGHT)%0Amyapp.run()%0A
|
|
1a9302d984e8fd0e467a04c87428b64d874e5f04
|
refactor customerWallet
|
usermanage/views/customerWallet.py
|
usermanage/views/customerWallet.py
|
Python
| 0.999998 |
@@ -0,0 +1,829 @@
+from django.shortcuts import render, redirect%0Afrom django.http import HttpResponseRedirect%0Afrom django.contrib.auth import login, authenticate, logout%0Afrom django.contrib.auth.models import User, Group%0Afrom django.contrib.auth.decorators import login_required, user_passes_test, permission_required%0Afrom django.contrib.auth.forms import UserCreationForm%0Afrom customermanage.models import Coupon, Wallet%0Afrom storemanage.models import Ticket%0A# Create your views here.%0Afrom usermanage import models%0A%0A@login_required()%0A@permission_required('usermanage.customer_rights',raise_exception=True)%0Adef customerWallet(request):%0A user = request.user%0A wallets = %5B%7B'name':w.currency.name,'amount':w.amount%7D for w in Wallet.objects.filter(user=user)%5D%0A print(wallets)%0A return render(request, 'index/wallet.html',%7B'wallets':wallets%7D)%0A
|
|
b9f28570ba619db5adacb05a7eadab77f140e876
|
Create __init__.py
|
fake_data_crud_service/rest/__init__.py
|
fake_data_crud_service/rest/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,114 @@
+__package__ = 'rest'%0A__author__ = 'Barbaglia, Guido'%0A__email__ = '[email protected];'%0A__license__ = 'MIT'%0A
|
|
192e60955051f8ffb34f6cc1f1e3f226acb1b5fb
|
add missing primary key constraints (#7129)
|
warehouse/migrations/versions/b5bb5d08543d_create_missing_primary_key_constraints.py
|
warehouse/migrations/versions/b5bb5d08543d_create_missing_primary_key_constraints.py
|
Python
| 0 |
@@ -0,0 +1,1021 @@
+# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22%0Acreate missing primary key constraints%0A%0ARevision ID: b5bb5d08543d%0ARevises: 08aedc089eaf%0ACreate Date: 2019-12-19 14:27:47.230249%0A%22%22%22%0A%0Afrom alembic import op%0A%0Arevision = %22b5bb5d08543d%22%0Adown_revision = %2208aedc089eaf%22%0A%0A%0Adef upgrade():%0A op.create_primary_key(None, %22release_files%22, %5B%22id%22%5D)%0A op.create_primary_key(None, %22release_dependencies%22, %5B%22id%22%5D)%0A op.create_primary_key(None, %22roles%22, %5B%22id%22%5D)%0A%0A%0Adef downgrade():%0A raise RuntimeError(%22Order No. 227 - %D0%9D%D0%B8 %D1%88%D0%B0%D0%B3%D1%83 %D0%BD%D0%B0%D0%B7%D0%B0%D0%B4!%22)%0A
|
|
a3d3040f16a604b534406d2f59a841d7ef6cebfa
|
Test HTTPMediaWikiAPI.get_content()
|
tests/test_api.py
|
tests/test_api.py
|
Python
| 0.000001 |
@@ -0,0 +1,421 @@
+import requests%0A%0Afrom unittest import TestCase%0Afrom mfnf.api import HTTPMediaWikiAPI%0A%0Aclass TestHTTPMediaWikiAPI(TestCase):%0A%0A def setUp(self):%0A self.api = HTTPMediaWikiAPI(requests.Session())%0A%0A def test_get_content(self):%0A content = self.api.get_content(%22Mathe f%C3%BCr Nicht-Freaks: Epsilon-Delta-Kriterium der Stetigkeit%22)%0A%0A self.assertTrue(content.startswith(%22%7B%7B#invoke:Mathe f%C3%BCr Nicht-Freaks%22))%0A
|
|
8139dc9e04025da001323122521951f5ed2c391b
|
Fix mysql encoding for users.profile.reason
|
users/migrations/0010_users-profile-encoding.py
|
users/migrations/0010_users-profile-encoding.py
|
Python
| 0.001314 |
@@ -0,0 +1,643 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.9.9 on 2016-09-25 01:43%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('users', '0009_remove_profile_active'),%0A %5D%0A%0A operations = %5B%0A migrations.RunSQL(%22ALTER DATABASE default CHARACTER SET = utf8mb4 COLLATE = utf8mb4_unicode_ci;%22),%0A migrations.RunSQL(%22ALTER TABLE users_profile CONVERT TO CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;%22),%0A migrations.RunSQL(%22ALTER TABLE users_profile MODIFY reason LONGTEXT CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;%22),%0A %5D%0A
|
|
328901c74d1ee103a1ee5b2f26aa391ddeda465b
|
Add unit test for webpage creation and description
|
tests/test_web.py
|
tests/test_web.py
|
Python
| 0 |
@@ -0,0 +1,1806 @@
+%22%22%22Test the AutoCMS web reporting functionality.%22%22%22%0A%0Aimport os%0Aimport shutil%0Aimport unittest%0Aimport re%0A%0Afrom autocms.core import load_configuration%0Afrom autocms.web import (%0A produce_default_webpage%0A)%0A%0A%0Aclass TestWebPageCreation(unittest.TestCase):%0A %22%22%22Test the accurate creation of test webpages.%22%22%22%0A%0A def setUp(self):%0A self.config = load_configuration('autocms.cfg.example')%0A self.config%5B'AUTOCMS_WEBDIR'%5D = self.config%5B'AUTOCMS_BASEDIR'%5D%0A # call the scratch directory 'uscratch' instead of 'scratch'%0A # so that in pathological cases one does not resolve to%0A # /scratch which is often used.%0A self.testdir = os.path.join(self.config%5B'AUTOCMS_BASEDIR'%5D,%0A 'uscratch')%0A os.makedirs(self.testdir)%0A self.page_description = 'AutoCMS Web Unit Test Description'%0A description_file = os.path.join(self.testdir, 'description.html')%0A with open(description_file, 'w') as description_filehandle:%0A description_filehandle.write(self.page_description)%0A%0A def tearDown(self):%0A shutil.rmtree(os.path.join(self.config%5B'AUTOCMS_BASEDIR'%5D,%0A 'uscratch'))%0A%0A def test_create_webpage_with_description(self):%0A %22%22%22Test that a default webpage is created with description.%22%22%22%0A records = %5B%5D%0A produce_default_webpage(records, 'uscratch', self.config)%0A webpage_path = os.path.join(self.config%5B'AUTOCMS_WEBDIR'%5D,%0A 'uscratch/index.html')%0A self.assertTrue(os.path.isfile(webpage_path))%0A with open(webpage_path) as webpage:%0A webpage_contents = webpage.read()%0A self.assertTrue(re.search(self.page_description, webpage_contents))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
e6b086f3baef34cf1e5278e930a034a92f4eee76
|
Add test for DirectionalGridCRF
|
tests/test_directional_crf.py
|
tests/test_directional_crf.py
|
Python
| 0 |
@@ -0,0 +1,1860 @@
+import numpy as np%0A%0Afrom numpy.testing import assert_array_equal, assert_array_almost_equal%0A#from nose.tools import assert_almost_equal%0A%0Aimport pystruct.toy_datasets as toy%0Afrom pystruct.lp_new import lp_general_graph%0Afrom pystruct.inference_methods import _make_grid_edges%0Afrom pystruct.crf import DirectionalGridCRF%0A%0A%0Adef test_inference():%0A # Test inference with different weights in different directions%0A%0A X, Y = toy.generate_blocks_multinomial(noise=2, n_samples=1, seed=1)%0A x, y = X%5B0%5D, Y%5B0%5D%0A n_states = x.shape%5B-1%5D%0A edges = _make_grid_edges(x, neighborhood=4)%0A%0A edge_list = _make_grid_edges(x, 4, return_lists=True)%0A edges = np.vstack(edge_list)%0A%0A pw_horz = -1 * np.eye(n_states)%0A xx, yy = np.indices(pw_horz.shape)%0A # linear ordering constraint horizontally%0A pw_horz%5Bxx %3E yy%5D = 1%0A%0A # high cost for unequal labels vertically%0A pw_vert = -1 * np.eye(n_states)%0A pw_vert%5Bxx != yy%5D = 1%0A pw_vert *= 10%0A%0A # generate edge weights%0A edge_weights_horizontal = np.repeat(pw_horz%5Bnp.newaxis, :, :%5D,%0A edge_list%5B0%5D.shape%5B0%5D, axis=0)%0A edge_weights_vertical = np.repeat(pw_vert%5Bnp.newaxis, :, :%5D,%0A edge_list%5B1%5D.shape%5B0%5D, axis=0)%0A edge_weights = np.vstack(%5Bedge_weights_horizontal, edge_weights_vertical%5D)%0A%0A # do inference%0A res = lp_general_graph(-x.reshape(-1, n_states), edges, edge_weights,%0A exact=False)%0A%0A # sam inference through CRF inferface%0A crf = DirectionalGridCRF(n_states=3, inference_method='lp')%0A w = np.hstack(%5Bnp.ones(3), -pw_horz.ravel(), -pw_vert.ravel()%5D)%0A y_pred = crf.inference(x, w, relaxed=True)%0A assert_array_almost_equal(res%5B0%5D, y_pred%5B0%5D.reshape(-1, n_states))%0A assert_array_almost_equal(res%5B1%5D, y_pred%5B1%5D)%0A assert_array_equal(y, np.argmax(y_pred%5B0%5D, axis=-1))%0A
|
|
439e4b740f6903341e81e158e6591c9cbd242a4c
|
Check in a tool that dumps graphviz output.
|
tools/graphviz.py
|
tools/graphviz.py
|
Python
| 0 |
@@ -0,0 +1,2833 @@
+#!/usr/bin/python%0A%0A# Copyright (c) 2011 Google Inc. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0A%22%22%22Using the JSON dumped by the dump-dependency-json generator,%0Agenerate input suitable for graphviz to render a dependency graph of%0Atargets.%22%22%22%0A%0Aimport collections%0Aimport json%0Aimport sys%0A%0A%0Adef ParseTarget(target):%0A target, _, suffix = target.partition('#')%0A filename, _, target = target.partition(':')%0A return filename, target, suffix%0A%0A%0Adef LoadEdges(filename, targets):%0A %22%22%22Load the edges map from the dump file, and filter it to only%0A show targets in %7Ctargets%7C and their depedendents.%22%22%22%0A%0A file = open('dump.json')%0A edges = json.load(file)%0A file.close()%0A%0A # Copy out only the edges we're interested in from the full edge list.%0A target_edges = %7B%7D%0A to_visit = targets%5B:%5D%0A while to_visit:%0A src = to_visit.pop()%0A if src in target_edges:%0A continue%0A target_edges%5Bsrc%5D = edges%5Bsrc%5D%0A to_visit.extend(edges%5Bsrc%5D)%0A%0A return target_edges%0A%0A%0Adef WriteGraph(edges):%0A %22%22%22Print a graphviz graph to stdout.%0A %7Cedges%7C is a map of target to a list of other targets it depends on.%22%22%22%0A%0A # Bucket targets by file.%0A files = collections.defaultdict(list)%0A for src, dst in edges.items():%0A build_file, target_name, toolset = ParseTarget(src)%0A files%5Bbuild_file%5D.append(src)%0A%0A print 'digraph D %7B'%0A print ' fontsize=8' # Used by subgraphs.%0A print ' node %5Bfontsize=8%5D'%0A%0A # Output nodes by file. We must first write out each node within%0A # its file grouping before writing out any edges that may refer%0A # to those nodes.%0A for filename, targets in files.items():%0A if len(targets) == 1:%0A # If there's only one node for this file, simplify%0A # the display by making it a box without an internal node.%0A target = targets%5B0%5D%0A build_file, target_name, toolset = ParseTarget(target)%0A print ' %22%25s%22 %5Bshape=box, label=%22%25s%5C%5Cn%25s%22%5D' %25 (target, filename,%0A target_name)%0A else:%0A # Group multiple nodes together in a subgraph.%0A print ' subgraph %22cluster_%25s%22 %7B' %25 filename%0A print ' label = %22%25s%22' %25 filename%0A for target in targets:%0A build_file, target_name, toolset = ParseTarget(target)%0A print ' %22%25s%22 %5Blabel=%22%25s%22%5D' %25 (target, target_name)%0A print ' %7D'%0A%0A # Now that we've placed all the nodes within subgraphs, output all%0A # the edges between nodes.%0A for src, dsts in edges.items():%0A for dst in dsts:%0A print ' %22%25s%22 -%3E %22%25s%22' %25 (src, dst)%0A%0A print '%7D'%0A%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) %3C 2:%0A print %3E%3Esys.stderr, __doc__%0A print %3E%3Esys.stderr%0A print %3E%3Esys.stderr, 'usage: %25s target1 target2...' %25 (sys.argv%5B0%5D)%0A sys.exit(1)%0A%0A edges = LoadEdges('dump.json', sys.argv%5B1:%5D)%0A%0A WriteGraph(edges)%0A
|
|
80d9a407d76f11573af5ccb6783f837b939b5466
|
Add Python benchmark
|
lib/node_modules/@stdlib/math/base/special/erfinv/benchmark/python/benchmark.scipy.py
|
lib/node_modules/@stdlib/math/base/special/erfinv/benchmark/python/benchmark.scipy.py
|
Python
| 0.000138 |
@@ -0,0 +1,1543 @@
+#!/usr/bin/env python%0A%22%22%22Benchmark scipy.special.erfinv.%22%22%22%0A%0Aimport timeit%0A%0Aname = %22erfinv%22%0Arepeats = 3%0Aiterations = 1000000%0A%0A%0Adef print_version():%0A %22%22%22Print the TAP version.%22%22%22%0A%0A print(%22TAP version 13%22)%0A%0A%0Adef print_summary(total, passing):%0A %22%22%22Print the benchmark summary.%0A%0A # Arguments%0A%0A * %60total%60: total number of tests%0A * %60passing%60: number of passing tests%0A%0A %22%22%22%0A%0A print(%22#%22)%0A print(%221..%22 + str(total)) # TAP plan%0A print(%22# total %22 + str(total))%0A print(%22# pass %22 + str(passing))%0A print(%22#%22)%0A print(%22# ok%22)%0A%0A%0Adef print_results(elapsed):%0A %22%22%22Print benchmark results.%0A%0A # Arguments%0A%0A * %60elapsed%60: elapsed time (in seconds)%0A%0A # Examples%0A%0A %60%60%60 python%0A python%3E print_results(0.131009101868)%0A %60%60%60%0A %22%22%22%0A%0A rate = iterations / elapsed%0A%0A print(%22 ---%22)%0A print(%22 iterations: %22 + str(iterations))%0A print(%22 elapsed: %22 + str(elapsed))%0A print(%22 rate: %22 + str(rate))%0A print(%22 ...%22)%0A%0A%0Adef benchmark():%0A %22%22%22Run the benchmark and print benchmark results.%22%22%22%0A%0A setup = %22from scipy.special import erfinv; from random import random;%22%0A stmt = %22y = erfinv(2.0*random() - 1.0)%22%0A%0A t = timeit.Timer(stmt, setup=setup)%0A%0A print_version()%0A%0A for i in xrange(3):%0A print(%22# python::%22 + name)%0A elapsed = t.timeit(number=iterations)%0A print_results(elapsed)%0A print(%22ok %22 + str(i+1) + %22 benchmark finished%22)%0A%0A print_summary(repeats, repeats)%0A%0A%0Adef main():%0A %22%22%22Run the benchmark.%22%22%22%0A benchmark()%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
3133bbfcb5ee56c88ea20be21778519bffe77299
|
Add another different type of book
|
literotica.py
|
literotica.py
|
Python
| 0.999663 |
@@ -0,0 +1,924 @@
+from common import *%0Afrom sys import argv%0Afrom urlgrab import Cache%0Afrom re import compile, DOTALL, MULTILINE%0A%0Acache = Cache()%0Aurl = argv%5B1%5D%0A%0AtitlePattern = compile(%22%3Ch1%3E(%5B%5E%3C%5D+)%3C/h1%3E%22)%0AcontentPattern = compile(%22%3Cdiv class=%5C%22b-story-body-x x-r15%5C%22%3E(.+?)%3C/div%3E%3Cdiv class=%5C%22b-story-stats-block%5C%22%3E%22 , DOTALL%7CMULTILINE)%0AnextPattern = compile(%22%5C%22(%5B%5E%5C%22%5D+)%5C%22%3ENext%3C/a%3E%22)%0A%0Apage = cache.get(url, max_age = -1)%0Adata = page.read()%0Aopen(%22dump%22, %22wb%22).write(data)%0A%0Atitle = titlePattern.findall(data)%0Aprint title%0Atitle = title%5B0%5D%0Acontent = u%22%22%0Awhile True:%0A%09contentMatch = contentPattern.findall(data)%0A%09print page.headers.headers%0A%09print type(data)%0A%09content += contentMatch%5B0%5D%0A%09#print content%0A%09nextMatch = nextPattern.findall(data)%0A%09if nextMatch == %5B%5D:%0A%09%09break%0A%0A%09nextURL = nextMatch%5B0%5D%0A%09print nextURL%0A%09page = cache.get(nextURL, max_age=-1)%0A%09data = page.read()%0A%0A%0Atoc = tocStart(title)%0AgeneratePage(url, title, content, title, toc)%0AtocEnd(toc)%0A
|
|
f31d6730a0cfbc50c55e9260391f399e77c3d631
|
access the repository from console
|
utils/__init__.py
|
utils/__init__.py
|
Python
| 0.000001 |
@@ -0,0 +1,18 @@
+__version__=%220.1%22%0A
|
|
893679baff0367538bdf3b52b04f8bae72732be8
|
Add migration to remove system avatar source.
|
zerver/migrations/0031_remove_system_avatar_source.py
|
zerver/migrations/0031_remove_system_avatar_source.py
|
Python
| 0 |
@@ -0,0 +1,487 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('zerver', '0030_realm_org_type'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='userprofile',%0A name='avatar_source',%0A field=models.CharField(choices=%5B('G', 'Hosted by Gravatar'), ('U', 'Uploaded by user')%5D, max_length=1, default='G'),%0A ),%0A %5D%0A
|
|
ff89cda5f77bec569c7451c9ee72ef7c028f7552
|
Add sample extraction script
|
extract_samples.py
|
extract_samples.py
|
Python
| 0 |
@@ -0,0 +1,166 @@
+import sys, os%0Aimport numpy as np%0Aimport pandas as pd%0Aimport datetime%0A%0Aif __name__ == '__main__':%0A infile = sys.argv%5B1%5D%0A csv_content = pd.read_csv(infile, %5B0%5D)%0A
|
|
73f47cc6a8a98b2026ee27985f8c3042352c941b
|
Add lc066_plus_one.py
|
lc066_plus_one.py
|
lc066_plus_one.py
|
Python
| 0.000849 |
@@ -0,0 +1,819 @@
+%22%22%22Leetcode 66. Plus One%0AEasy%0A%0AURL: https://leetcode.com/problems/plus-one/%0A%0AGiven a non-empty array of digits representing a non-negative integer, %0Aplus one to the integer.%0A%0AThe digits are stored such that the most significant digit is at the %0Ahead of the list, and each element in the array contain a single digit.%0A%0AYou may assume the integer does not contain any leading zero,%0Aexcept the number 0 itself.%0A%0AExample 1:%0AInput: %5B1,2,3%5D%0AOutput: %5B1,2,4%5D%0AExplanation: The array represents the integer 123.%0A%0AExample 2:%0AInput: %5B4,3,2,1%5D%0AOutput: %5B4,3,2,2%5D%0AExplanation: The array represents the integer 4321.%0A%22%22%22%0A%0Aclass Solution(object):%0A def plusOne(self, digits):%0A %22%22%22%0A :type digits: List%5Bint%5D%0A :rtype: List%5Bint%5D%0A %22%22%22%0A pass%0A%0A%0Adef main():%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
efe8b8f34919425d36fc36eee8ca719c49f4f3b5
|
fix data types for default values
|
lib/governance.py
|
lib/governance.py
|
#!/usr/bin/env python
import pdb
import time
import argparse
import sys
import json
sys.path.append("../")
sys.path.append("../scripts")
import misc
import binascii
# PeeWee models -- to replace hand-coded versions
from models import PeeWeeEvent, PeeWeeSuperblock, PeeWeeProposal, PeeWeeGovernanceObject
from pprint import pprint
class GovernanceObject:
# object data for specific classes
def __init__(self):
self.subclasses = [] #object based subclasses
# mysql record data
# self.governance_object = {}
self.governance_object = PeeWeeGovernanceObject()
@classmethod
def root(self):
root_object_dict = {
"object_name" : "root",
"object_type" : "0",
"object_creation_time" : 0,
"object_data" : binascii.hexlify(json.dumps([]))
}
root = GovernanceObject()
root.init(**root_object_dict)
return root
def init(self, **kwargs):
new_gobj_dict = {
"parent_id" : 0,
"object_hash" : 0,
"object_parent_hash" : 0,
"object_creation_time" : misc.get_epoch(),
"object_name" : "",
"object_type" : "",
"object_revision" : 1,
"object_fee_tx" : "",
"object_data" : ""
}
if kwargs:
for key, value in kwargs.iteritems():
new_gobj_dict[ key ] = value
self.governance_object = PeeWeeGovernanceObject(**new_gobj_dict)
return self
"""
Subclasses:
- Governance objects can be converted into many subclasses by using the data field.
- See subclasses.py for more information
"""
def compile_subclasses(self):
objects = []
for (obj_type, obj) in self.subclasses:
objects.append((obj_type, obj.get_dict()))
self.governance_object.object_data = binascii.hexlify(json.dumps(objects, sort_keys = True))
return True
def save_subclasses(self):
objects = []
for (obj_type, obj) in self.subclasses:
obj.save()
return True
def load_subclasses(self):
the_json = binascii.unhexlify(self.governance_object.object_data)
objects = json.loads( the_json )
## todo -- make plugin system for subclasses?
for (obj_type, obj_data) in objects:
if obj_type == "proposal":
obj = PeeWeeProposal(**obj_data)
if obj_type == "trigger":
obj = PeeWeeSuperblock(**obj_data)
self.subclasses.append((obj_type, obj))
return True
"""
load/save/update from database
"""
def load(self, record_id):
self.init()
gobj = PeeWeeGovernanceObject.get(PeeWeeGovernanceObject.id == record_id)
if gobj:
self.governance_object = gobj
print "loaded govobj successfully: ", self.governance_object.id
self.load_subclasses()
else:
print "object not found"
print
def update_field(self, field, value):
self.governance_object.__setattr__(field, value)
def get_field(self, field):
return self.governance_object.__getattr__(field)
def save(self):
self.compile_subclasses()
#pdb.set_trace()
self.governance_object.save()
self.save_subclasses()
return self.governance_object.id
# === governance commands
def get_prepare_command(self):
# cmd = "gobject prepare %(object_parent_hash)s %(object_revision)s %(object_creation_time)s %(object_name)s %(object_data)s" % self.governance_object
cmd = "gobject prepare %s %s %s %s %s" % (
self.governance_object.object_parent_hash,
self.governance_object.object_revision,
self.governance_object.object_creation_time,
self.governance_object.object_name,
self.governance_object.object_data
)
return cmd
def get_submit_command(self):
cmd = "gobject submit %(object_fee_tx)s %(object_parent_hash)s %(object_revision)s %(object_creation_time)s %(object_name)s %(object_data)s" % self.governance_object
return cmd
def add_subclass(self, typename, obj):
self.subclasses.append((typename,obj))
def is_valid(self):
"""
- check tree position validity
- check signatures of owners
- check validity of revision (must be n+1)
- check validity of field data (address format, etc)
"""
return True
|
Python
| 0.000001 |
@@ -728,11 +728,9 @@
%22 :
-%220%22
+0
,%0A
@@ -1197,26 +1197,25 @@
ect_type%22 :
-%22%22
+0
,%0A
|
b51398d602a157ce55fd7e08eedd953051f716a1
|
Add script to update uploaded files.
|
backend/scripts/updatedf.py
|
backend/scripts/updatedf.py
|
Python
| 0 |
@@ -0,0 +1,214 @@
+#!/usr/bin/env python%0A%0A#import hashlib%0Aimport os%0A%0Adef main():%0A for root, dirs, files in os.walk(%22/mcfs/data/materialscommons%22):%0A for f in files:%0A print f%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
ba3582d1e4521c040ef9f43c3a4760eb4fd694da
|
add lib/config_loader.py
|
hokusai/lib/config_loader.py
|
hokusai/lib/config_loader.py
|
Python
| 0.000002 |
@@ -0,0 +1,945 @@
+import os%0Aimport tempfile%0Aimport shutil%0A%0Afrom urlparse import urlparse%0A%0Aimport boto3%0Aimport yaml%0A%0Afrom hokusai.lib.common import get_region_name%0Afrom hokusai.lib.exceptions import HokusaiError%0A%0Aclass ConfigLoader%0A def __init__(self, uri):%0A self.uri = uri%0A%0A def load(self):%0A uri = urlparse(self.uri)%0A if not uri.path.endswith('yaml') or not uri.path.endswith('yml'):%0A raise HokusaiError('Uri must be of Yaml file type')%0A%0A tmpdir = tempfile.mkdtemp()%0A%0A switch(uri.scheme):%0A case 's3':%0A client = boto3.client('s3', region_name=get_region_name())%0A tmp_configfile = os.path.join(tmpdir, 'config')%0A client.download_file(uri.netloc, uri.path.lstrip('/'), tmp_configfile)%0A%0A default:%0A tmp_configfile = uri.path%0A%0A with open(tmp_configfile, 'r') as f:%0A struct = yaml.safe_load(f.read())%0A if type(struct) is not obj:%0A raise HokusaiError('Yaml is invalid')%0A%0A return struct%0A
|
|
21e766688e3cc4d08339f81c35dba43d26010a6d
|
edit vehicle form
|
vehicles/forms.py
|
vehicles/forms.py
|
Python
| 0 |
@@ -0,0 +1,386 @@
+from django import forms%0A%0A%0Aclass EditVehicleForm(forms.Form):%0A fleet_number = forms.CharField(label='Fleet number', required=False)%0A reg = forms.CharField(label='Registration', required=False)%0A vehicle_type = forms.CharField(label='Type', required=False)%0A colours = forms.CharField(label='Colours', required=False)%0A notes = forms.CharField(label='Notes', required=False)%0A
|
|
fbf36a2fb52b5ed1aceaec4c1d1075448584a97d
|
Test that modules can be imported in any order
|
tests/test_imports.py
|
tests/test_imports.py
|
Python
| 0 |
@@ -0,0 +1,622 @@
+%22%22%22Test that all modules/packages in the lektor tree are importable in any order%0A%0AHere we import each module by itself, one at a time, each in a new%0Apython interpreter.%0A%0A%22%22%22%0Aimport pkgutil%0Aimport sys%0Afrom subprocess import run%0A%0Aimport pytest%0A%0Aimport lektor%0A%0A%0Adef iter_lektor_modules():%0A for module in pkgutil.walk_packages(lektor.__path__, f%22%7Blektor.__name__%7D.%22):%0A yield module.name%0A%0A%[email protected](params=iter_lektor_modules())%0Adef module(request):%0A return request.param%0A%0A%0Adef test_import(module):%0A python = sys.executable%0A assert run(%5Bpython, %22-c%22, f%22import %7Bmodule%7D%22%5D, check=False).returncode == 0%0A
|
|
e3bdccc8c7ef23b449a53043f4a048fe71cd642c
|
Use an explicit list due to the filter-object type of python3
|
accounting/apps/connect/views.py
|
accounting/apps/connect/views.py
|
from django.views import generic
from django.http import HttpResponseRedirect
from django.core.urlresolvers import reverse
from accounting.apps.books.models import Organization
from .steps import (
CreateOrganizationStep,
ConfigureTaxRatesStep,
ConfigureBusinessSettingsStep,
ConfigureFinancialSettingsStep,
AddEmployeesStep,
ConfigurePayRunSettingsStep,
AddFirstClientStep,
AddFirstInvoiceStep)
class RootRedirectionView(generic.View):
"""
Redirect to the books if an organization is already configured
Otherwise we begin the step by step creation process to help the user
begin and configure his books
"""
def get(self, *args, **kwargs):
if Organization.objects.all().count():
return HttpResponseRedirect(reverse('books:dashboard'))
class GettingStartedView(generic.TemplateView):
template_name = "connect/getting_started.html"
def get_steps(self, request):
user = request.user
steps = steps = [
CreateOrganizationStep(user),
ConfigureTaxRatesStep(user),
ConfigureBusinessSettingsStep(user),
ConfigureFinancialSettingsStep(user),
AddEmployeesStep(user),
ConfigurePayRunSettingsStep(user),
AddFirstClientStep(user),
AddFirstInvoiceStep(user),
]
return steps
def get_context_data(self, **kwargs):
ctx = super().get_context_data(**kwargs)
request = self.request
steps = self.get_steps(self.request)
uncompleted_steps = filter(lambda s: not s.completed(request), steps)
try:
next_step = next(uncompleted_steps)
except StopIteration:
next_step = None
ctx['steps'] = steps
ctx['next_step'] = next_step
ctx['all_steps_completed'] = bool(next_step is None)
return ctx
def post(self, request, *args, **kwargs):
steps = self.get_steps(request)
uncompleted_steps = filter(lambda s: not s.completed(request), steps)
if not len(uncompleted_steps):
return super().post(request, *args, **kwargs)
# unmark the session as getting started
request.sessions['getting_started_done'] = True
return HttpResponseRedirect(reverse('books:dashboard'))
|
Python
| 0.000005 |
@@ -1559,33 +1559,26 @@
complete
-d_steps =
+_
filter
-(
+ =
lambda s
@@ -1599,32 +1599,90 @@
ted(request)
+%0A uncompleted_steps = list(filter(uncomplete_filter
, steps)
%0A try
@@ -1661,32 +1661,33 @@
e_filter, steps)
+)
%0A try:%0A
@@ -1713,16 +1713,27 @@
= next(
+s for s in
uncomple
|
b7fff47b228fbe8774c9f465c383ae1015c598fe
|
use cvmfs.py for openRootCatalog.py
|
add-ons/tools/openRootCatalog.py
|
add-ons/tools/openRootCatalog.py
|
#!/usr/bin/python
import sys
import zlib
import tempfile
import subprocess
def getRootCatalogName(cvmfspublished):
try:
cvmfspubdata = open(cvmfspublished, 'rb').read()
except:
print "cannot open .cvmfspublished"
sys.exit(1)
lines = cvmfspubdata.split('\n')
if len(lines) < 1:
print ".cvmfspublished is malformed"
sys.exit(1)
return lines[0][1:]
def decompressCatalog(filename, destination):
str_object1 = open(filename, 'rb').read()
str_object2 = zlib.decompress(str_object1)
f = open(destination, 'wb')
f.write(str_object2)
f.close()
def openCatalog(filename):
subprocess.call(['sqlite3', filename])
def usage():
print sys.argv[0] + " <repository name>"
print "This script decompresses the root catalog file to a temporary storage"
print "and opens this directly with sqlite3."
print "WARNING: changes to this database will not persist, as it is only a temp"
def main():
if len(sys.argv) != 2:
usage()
sys.exit(1)
repoDir = "/srv/cvmfs/" + sys.argv[1] + "/";
rootCatalog = getRootCatalogName(repoDir + ".cvmfspublished")
myTmpFile = tempfile.NamedTemporaryFile('wb')
decompressCatalog(repoDir + "data/" + rootCatalog[:2] + "/" + rootCatalog[2:] + "C", myTmpFile.name)
openCatalog(myTmpFile.name)
myTmpFile.close()
main()
|
Python
| 0 |
@@ -23,650 +23,82 @@
ort
-sys%0Aimport zlib%0Aimport tempfile%0Aimport subprocess%0A%0Adef getRootCatalogName(cvmfspublished):%0A%09try:%0A%09%09cvmfspubdata = open(cvmfspublished, 'rb').read()%0A%09except
+cvmfs%0A%0Aimport sys%0A%0Adef usage()
:%0A
-%09
%09print
-%22cannot open .cvmfspublished%22%0A%09%09sys.exit(1)%0A%09%0A%09lines = cvmfspubdata.split('%5Cn')%0A%09if len(lines) %3C 1:%0A%09%09print %22.cvmfspublished is malformed%22%0A%09%09sys.exit(1)%0A%0A%09return lines%5B0%5D%5B1:%5D%0A%0Adef decompressCatalog(filename, destination):%0A%09str_object1 = open(filename, 'rb').read()%0A%09str_object2 = zlib.decompress(str_object1)%0A%09f = open(destination, 'wb')%0A%09f.write(str_object2)%0A%09f.close()%0A%0Adef openCatalog(filename):%0A%09subprocess.call(%5B'sqlite3', filename%5D)%0A%0Adef usage():%0A%09print sys.argv%5B0%5D + %22 %3C
+sys.argv%5B0%5D + %22 %3Crepository path %7C
repo
@@ -104,20 +104,19 @@
ository
-name
+url
%3E%22%0A%09prin
@@ -383,322 +383,122 @@
(1)%0A
-%09
%0A%09repo
-Dir = %22/srv/cvmfs/%22 + sys.argv%5B1%5D + %22/%22;%0A%09rootCatalog = getRootCatalogName(repoDir + %22.cvmfspublished%22)%0A%09%0A%09myTmpFile = tempfile.NamedTemporaryFile('wb')%0A%09decompressCatalog(repoDir + %22data/%22 + rootCatalog%5B:2%5D + %22/%22 + rootCatalog%5B2:%5D + %22C%22, myTmpFile.name)%0A%09openCatalog(myTmpFile.name)%0A%09myTmpFile.clos
+ = cvmfs.OpenRepository(sys.argv%5B1%5D)%0A%09root_clg = repo.RetrieveRootCatalog()%0A%09root_clg.OpenInteractiv
e()%0A
-%09
%0Amain()
+%0A
|
a377195fa95b819924ddfbd3fb564cffbe08f9ae
|
Add an example for solvent model to customize solvent cavity
|
examples/solvent/30-custom_solvent_cavity.py
|
examples/solvent/30-custom_solvent_cavity.py
|
Python
| 0 |
@@ -0,0 +1,1924 @@
+#!/usr/bin/env python%0A%0A'''%0ACustom solvent cavity%0A'''%0A%0Aimport numpy%0Afrom pyscf import gto, qmmm, solvent%0A%0A#%0A# Case 1. Cavity for dummy atoms with basis on the dummy atoms%0A#%0Amol = gto.M(atom='''%0AC 0.000000 0.000000 -0.542500%0AO 0.000000 0.000000 0.677500%0AH 0.000000 0.9353074360871938 -1.082500%0AH 0.000000 -0.9353074360871938 -1.082500%0AX-C 0.000000 0.000000 -1.5%0AX-O 0.000000 0.000000 1.6%0A ''',%0A verbose = 4)%0A%0Asol = solvent.ddCOSMO(mol)%0Acavity_radii = sol.get_atomic_radii()%0A%0Acavity_radii%5B4%5D = 3.0 # Bohr, for X-C%0Acavity_radii%5B5%5D = 2.5 # Bohr, for X-O%0A# Overwrite the get_atom_radii method to feed the custom cavity into the solvent model%0Asol.get_atomic_radii = lambda: cavity_radii%0A%0Amf = mol.RHF().ddCOSMO(sol)%0Amf.run()%0A%0A%0A#%0A# Case 2. Cavity for dummy atoms (without basis)%0A#%0Amol = gto.M(atom='''%0AC 0.000000 0.000000 -0.542500%0AO 0.000000 0.000000 0.677500%0AH 0.000000 0.9353074360871938 -1.082500%0AH 0.000000 -0.9353074360871938 -1.082500%0A ''',%0A verbose = 4)%0A%0A# Use a MM molecule to define cavity from dummy atoms.%0A# See also the example 22-with_qmmm.py%0Acoords = numpy.array(%5B%0A %5B0, 0, -1.5%5D,%0A %5B0, 0, 1.6%5D,%0A%5D)%0Acharges = numpy.array(%5B0, 0%5D)%0Amm_atoms = %5B('X', c) for c in coords%5D%0Amm_mol = qmmm.create_mm_mol(mm_atoms, charges)%0A%0A# Make a giant system include both QM and MM particles%0Aqmmm_mol = mol + mm_mol%0A%0A# The solvent model is based on the giant system%0Asol = solvent.ddCOSMO(qmmm_mol)%0Acavity_radii = sol.get_atomic_radii()%0A%0A# Custom cavity%0Acavity_radii%5B4%5D = 3.0 # Bohr%0Acavity_radii%5B5%5D = 2.5 # Bohr%0A# Overwrite the get_atom_radii method to feed the custom cavity into the solvent model%0Asol.get_atomic_radii = lambda: cavity_radii%0A%0Amf = mol.RHF().QMMM(coords, charges)%0Amf = mf.ddCOSMO(sol)%0Amf.run()%0A
|
|
97ecb8f7dbcb36cfa9e2d180f29d29002eea127e
|
add elasticsearch import
|
examples/ElasticsearchIntegrationWithSpark/import_from_elasticsearch.py
|
examples/ElasticsearchIntegrationWithSpark/import_from_elasticsearch.py
|
Python
| 0 |
@@ -0,0 +1,2082 @@
+#%0A# Licensed to the Apache Software Foundation (ASF) under one or more%0A# contributor license agreements. See the NOTICE file distributed with%0A# this work for additional information regarding copyright ownership.%0A# The ASF licenses this file to You under the Apache License, Version 2.0%0A# (the %22License%22); you may not use this file except in compliance with%0A# the License. You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A%0Afrom __future__ import print_function%0A%0Aimport sys%0Afrom operator import add%0A%0Afrom pyspark import SparkContext%0Afrom pyspark import SparkConf%0Afrom pyspark.sql import SQLContext%0A%0Aif __name__ == %22__main__%22:%0A if len(sys.argv) != 7:%0A print(%22Usage: export_to_elasticsearch.py %3Chost%3E %3Cport%3E %3Cuser%3E %3Cpass%3E %3Ctmpdir%3E %3CtmpHdfsDir%3E%22, file=sys.stderr)%0A exit(-1)%0A%0A host = sys.argv%5B1%5D%0A port = sys.argv%5B2%5D%0A user = sys.argv%5B3%5D%0A password = sys.argv%5B4%5D%0A tmpDir = sys.argv%5B5%5D%0A tmpHdfsDir = sys.argv%5B6%5D%0A%0A conf = SparkConf().setAppName(%22Elasticsearch example%22)%0A%0A # see https://www.elastic.co/guide/en/elasticsearch/hadoop/current/configuration.html%0A conf.set(%22es.nodes%22,host)%0A conf.set(%22es.port%22,str(port))%0A conf.set(%22es.net.http.auth.user%22,user)%0A conf.set(%22es.net.http.auth.pass%22,password)%0A conf.set(%22es.net.ssl%22,%22true%22)%0A conf.set(%22es.net.ssl.truststore.location%22,%22truststore.jks%22)%0A conf.set(%22es.net.ssl.truststore.pass%22,%22mypassword%22)%0A conf.set(%22es.nodes.wan.only%22,%22true%22)%0A%0A sc = SparkContext(conf=conf)%0A sqlContext = SQLContext(sc)%0A%0A # read the data from elasticsearch%0A esdata = sqlContext.read.format(%22es%22).load(%22spark/%7B0%7D%22.format(tmpDir))%0A%0A # save to hdfs%0A esdata.rdd.saveAsTextFile(tmpHdfsDir)%0A%0A sc.stop()%0A
|
|
8b419fefc93f9084b8d504b7382fd51087e4645f
|
add migration script that removes table 'regressions'
|
benchbuild/db/versions/001_Remove_RegressionTest_table.py
|
benchbuild/db/versions/001_Remove_RegressionTest_table.py
|
Python
| 0.000138 |
@@ -0,0 +1,846 @@
+%22%22%22%0ARemove unneeded Regressions table.%0A%0AThis table can and should be reintroduced by an experiment that requires it.%0A%22%22%22%0Afrom sqlalchemy import Table, Column, ForeignKey, Integer, String%0Afrom benchbuild.utils.schema import metadata%0A%0AMETA = metadata()%0AREGRESSION = Table('regressions', META,%0A Column(%0A 'run_id',%0A Integer,%0A ForeignKey(%0A 'run.id', onupdate=%22CASCADE%22, ondelete=%22CASCADE%22),%0A index=True,%0A primary_key=True), Column('name', String),%0A Column('module', String), Column('project_name', String))%0A%0A%0Adef upgrade(migrate_engine):%0A META.bind = migrate_engine%0A REGRESSION.drop()%0A%0A%0Adef downgrade(migrate_engine):%0A META.bind = migrate_engine%0A REGRESSION.create()%0A
|
|
847232f2890a4700e4983cd971ef2cd1a76a4b1d
|
rebuild cases
|
corehq/apps/cleanup/management/commands/rebuild_cases.py
|
corehq/apps/cleanup/management/commands/rebuild_cases.py
|
Python
| 0.000002 |
@@ -0,0 +1,1235 @@
+from __future__ import print_function%0Afrom __future__ import absolute_import%0Afrom __future__ import unicode_literals%0Aimport logging%0Afrom django.core.management.base import BaseCommand%0A%0Afrom corehq.form_processor.backends.sql.processor import FormProcessorSQL%0Afrom corehq.form_processor.models import RebuildWithReason%0A%0A%0Alogger = logging.getLogger('rebuild_cases')%0Alogger.setLevel('DEBUG')%0A%0A%0Aclass Command(BaseCommand):%0A help = ('Rebuild given cases')%0A%0A def add_arguments(self, parser):%0A parser.add_argument('domain')%0A parser.add_argument('cases_csv_file')%0A%0A def handle(self, domain, cases_csv_file, **options):%0A cases = %5B%5D%0A with open(cases_csv_file, 'r') as f:%0A lines = f.readlines()%0A cases = %5Bl.strip() for l in lines%5D%0A%0A rebuild_cases(domain, cases, logger)%0A%0A%0Adef rebuild_cases(domain, cases, logger):%0A detail = RebuildWithReason(reason='undo UUID clash')%0A for case_id in cases:%0A try:%0A FormProcessorSQL.hard_rebuild_case(domain, case_id, detail)%0A logger.info('Case %25s rebuilt' %25 case_id)%0A except Exception as e:%0A logger.error(%22Exception rebuilding case %25s%22.format(case_id))%0A logger.exception(%22message%22)%0A%0A
|
|
0919661333c8099a85e7c12c6ce9393ced8c985b
|
create the lib directory to hold vendored libraries
|
ceph_deploy/lib/__init__.py
|
ceph_deploy/lib/__init__.py
|
Python
| 0 |
@@ -0,0 +1,272 @@
+%22%22%22%0AThis module is meant for vendorizing Python libraries. Most libraries will need%0Ato have some %60%60sys.path%60%60 alterations done unless they are doing relative%0Aimports.%0A%0ADo **not** add anything to this module that does not represent a vendorized%0Alibrary.%0A%22%22%22%0A%0Aimport remoto%0A
|
|
6303ffeee0118a2fef1cb0a9abfe931a04ee6974
|
Fix web app. #79
|
channelworm/web_app/wsgi.py
|
channelworm/web_app/wsgi.py
|
Python
| 0.000048 |
@@ -0,0 +1,389 @@
+%22%22%22%0AWSGI config for myproject project.%0AIt exposes the WSGI callable as a module-level variable named %60%60application%60%60.%0AFor more information on this file, see%0Ahttps://docs.djangoproject.com/en/1.7/howto/deployment/wsgi/%0A%22%22%22%0A%0Aimport os%0Aos.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22web_app.settings%22)%0A%0Afrom django.core.wsgi import get_wsgi_application%0Aapplication = get_wsgi_application()%0A
|
|
e5ed4497fd8aee709dd441cfcddc9a1a91c538d4
|
add theilsen
|
chart-02-theilsen-median-of-root-median-squared-errors.py
|
chart-02-theilsen-median-of-root-median-squared-errors.py
|
Python
| 0.999936 |
@@ -0,0 +1,1238 @@
+# create files for chart-02-theilsen-median-of-root-mdian-squared-errors%0A# with these choices%0A# metric in median-root-median-squared-errors%0A# model in theilsen%0A# ndays in 30 60 ... 360%0A# predictors in act actlog ct ctlog%0A# responses in price logprice%0A# usetax in yes no%0A# year in 2008%0A# invocations and files created%0A# python chart-02X.py makefile -%3E src/chart-02X.makefile%0A# python chart-02X.py data -%3E data/working/chart-02X.data%0A# python chart-02X.py txt -%3E data/working/chart-02X.txt%0A# python chart-02X.py txtY -%3E data/working/chart-02X-Y.txt%0A%0Aimport sys%0A%0A%0Afrom Bunch import Bunch%0Afrom chart_02_template import chart%0A%0A%0Adef main():%0A specs = Bunch(metric='median-of-root-median-squared-errors',%0A title='Median of Root Median Squared Errors',%0A model='theilsen',%0A training_periods=%5B'30', '60', '90', '120', '150', '180',%0A '210', '240', '270', '300', '330', '360'%5D,%0A feature_sets=%5B'act', 'actlog', 'ct', 'ctlog'%5D,%0A responses=%5B'price', 'logprice'%5D,%0A year='2008')%0A chart(specs=specs,%0A argv=sys.argv)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
2df737f2690925e2752ae7633f1db05f952209bc
|
Create led_record.py
|
led_record.py
|
led_record.py
|
Python
| 0.000001 |
@@ -0,0 +1,1588 @@
+#!/usr/bin/env python%0Aimport RPi.GPIO as GPIO%0Afrom time import sleep%0Aimport os%0Aimport subprocess%0A%0A# Setup getting an image%0Adef get_video(state):%0A folderName = %22/home/pi/HumphreyData/%22%0A%09if os.path.isdir(folderName)== False:%0A%09 os.makedirs(folderName)%0A fileNumber = 1%0A filePath = folderName + str(fileNumber) + %22.h264%22%0A while os.path.isfile(filePath):%0A fileNumber += 1%0A filePath = folderName + str(fileNumber) + %22.h264%22%0A%0A fileName = str(fileNumber)%0A%09cmdStr = %22sudo raspivid -n -w 1024 -h 768 -t 0 -fps 2 -o %25s/%25s.h264%22 %25(folderName, fileName)%0A if state:%0A capture = subprocess.Popen(cmdStr, shell=True)%0A else:%0A pid = %22sudo pkill -15 -f raspivid%22%0A os.system(pid)%0A%0A# Setup LED control%0Adef switch_LED(state):%0A%09for item in LEDpins:%0A GPIO.output(item, state)%0A%0A# Setup GPIO config%0AGPIO.setwarnings(False)%0AGPIO.setmode(GPIO.BOARD)%0A%0A# Setup GPIO pins%0ALEDpins = %5B19, 21%5D%0AswitchState = 23%0A%0A# If true, LEDS are off -%3E GPIO pins are current sinks%0AlOn = False%0AlOff = True%0A%0A# Configure LED GPIO pins%0Afor item in LEDpins:%0A GPIO.setup(item, GPIO.OUT)%0A%09GPIO.output(item, lOff)%0A%0A# Configure switch GPIO pins%0AGPIO.setup(switchState, GPIO.IN, pull_up_down=GPIO.PUD_UP)%0A%0A# Scipt ready flash%0Aflashes = 1%0Awhile flashes %3C 4:%0A switch_LED(lOn)%0A%09sleep(0.5)%0A%09switch_LED(lOff)%0A%09sleep(0.5)%0A%09flashes += 1%0A%0A# Pin check loop%0Awhile True:%0A if GPIO.input(switchState):%0A captureState = False%0A switch_LED(lOff)%0A else:%0A captureState = True%0A switch_LED(lOn)%0A%0A get_video(captureState)%0A GPIO.wait_for_edge(switchState, GPIO.BOTH)%0A sleep(0.2)%0A%0A# Script cleanup%0AGPIO.cleanup()%0A
|
|
4567d9d710654ef47f971821a9bec79a41515264
|
Make call_local test a bit more complex.
|
distarray/tests/test_odin.py
|
distarray/tests/test_odin.py
|
"""
Test Odin extensions to distarray.
To run these tests, you must have an ipcluster running
For example,
$ ipcluster start -n <n> --engines=MPIEngineSetLauncher
"""
import unittest
import numpy as np
from distarray import odin
@odin.local
def assert_allclose(da, db):
assert np.allclose(da, db), "Arrays not equal within tolerance."
@odin.local
def local_sin(da):
return np.sin(da)
@odin.local
def local_add50(da):
return da + 50
@odin.local
def local_sum(da):
return np.sum(da.get_localarray())
@odin.local
def local_add_num(da, num):
return da + num
@odin.local
def local_add_nums(da, num1, num2, num3):
return da + num1 + num2 + num3
@odin.local
def local_add_distarrayproxies(da, dg):
return da + dg
@odin.local
def local_add_mixed(da, num1, dg, num2):
return da + num1 + dg + num2
@odin.local
def local_add_ndarray(da, num, ndarr):
return da + num + ndarr
@odin.local
def local_add_kwargs(da, num1, num2=55):
return da + num1 + num2
@odin.local
def local_add_supermix(da, num1, db, num2, dc, num3=99, num4=66):
return da + num1 + db + num2 + dc + num3 + num4
@odin.local
def local_none(da):
return None
@odin.local
def call_local(da):
return local_add50(da)
class TestLocal(unittest.TestCase):
def setUp(self):
odin.context._execute('import numpy as np')
self.da = odin.context.empty((1024, 1024))
self.da.fill(2 * np.pi)
def test_local_sin(self):
db = local_sin(self.da)
assert_allclose(db, 0)
def test_local_add(self):
dc = local_add50(self.da)
assert_allclose(dc, 2 * np.pi + 50)
def test_local_sum(self):
dd = local_sum(self.da)
client_dd = np.array(odin.view.pull(dd.key))
shape = self.da.get_localshapes()[0]
sum_val = shape[0] * shape[1] * (2 * np.pi)
assert_allclose(client_dd, sum_val)
def test_local_add_num(self):
de = local_add_num(self.da, 11)
assert_allclose(de, 2 * np.pi + 11)
def test_local_add_nums(self):
df = local_add_nums(self.da, 11, 12, 13)
assert_allclose(df, 2 * np.pi + 11 + 12 + 13)
def test_local_add_distarrayproxies(self):
dg = odin.context.empty((1024, 1024))
dg.fill(33)
dh = local_add_distarrayproxies(self.da, dg)
assert_allclose(dh, 33 + 2 * np.pi)
def test_local_add_mixed(self):
di = odin.context.empty((1024, 1024))
di.fill(33)
dj = local_add_mixed(self.da, 11, di, 12)
assert_allclose(dj, 2 * np.pi + 11 + 33 + 12)
@unittest.skip('Locally adding ndarrays not supported.')
def test_local_add_ndarray(self):
shp = self.da.get_localshapes()[0]
ndarr = np.empty(shp)
ndarr.fill(33)
dk = local_add_ndarray(self.da, 11, ndarr)
assert_allclose(dk, 2 * np.pi + 11 + 33)
def test_local_add_kwargs(self):
dl = local_add_kwargs(self.da, 11, num2=12)
assert_allclose(dl, 2 * np.pi + 11 + 12)
def test_local_add_supermix(self):
dm = odin.context.empty((1024, 1024))
dm.fill(22)
dn = odin.context.empty((1024, 1024))
dn.fill(44)
do = local_add_supermix(self.da, 11, dm, 33, dc=dn, num3=55)
expected = 2 * np.pi + 11 + 22 + 33 + 44 + 55 + 66
assert_allclose(do, expected)
@unittest.skip("Doesn't know what to do with NoneType")
def test_local_none(self):
dp = local_none(self.da)
client_dp = odin.view.pull(dp.key)
print client_dp
def test_call_local(self):
dq = call_local(self.da)
assert_allclose(dq, 2 * np.pi + 50)
if __name__ == '__main__':
unittest.main(verbosity=2)
|
Python
| 0 |
@@ -1221,22 +1221,20 @@
a):%0A
-return
+db =
local_a
@@ -1242,16 +1242,61 @@
d50(da)%0A
+ dc = local_add_num(db, 99)%0A return dc%0A
%0A%0Aclass
@@ -3692,16 +3692,21 @@
.pi + 50
+ + 99
)%0A%0A%0Aif _
|
f68689e3b6caaad2d143d92af5395f7c12316525
|
add simple test file
|
test.py
|
test.py
|
Python
| 0 |
@@ -0,0 +1,1552 @@
+from __future__ import division%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%0Afrom pybasicbayes.distributions import Gaussian, Regression%0Afrom autoregressive.distributions import AutoRegression%0Afrom pyhsmm.util.text import progprint_xrange%0A%0Afrom models import LDS%0A%0Anp.random.seed(0)%0A%0A#########################%0A# set some parameters #%0A#########################%0A%0Amu_init = np.array(%5B0.,1.%5D)%0Asigma_init = 0.01*np.eye(2)%0A%0AA = 0.99*np.array(%5B%5Bnp.cos(np.pi/24), -np.sin(np.pi/24)%5D,%0A %5Bnp.sin(np.pi/24), np.cos(np.pi/24)%5D%5D)%0A# A = 0.99*np.eye(2)%0Asigma_states = 0.01*np.eye(2)%0A%0AC = np.array(%5B%5B10.,0.%5D%5D)%0Asigma_obs = 0.01*np.eye(1)%0A%0A###################%0A# generate data #%0A###################%0A%0Atruemodel = LDS(%0A init_dynamics_distn=Gaussian(mu=mu_init,sigma=sigma_init),%0A dynamics_distn=AutoRegression(A=A,sigma=sigma_states),%0A emission_distn=Regression(A=C,sigma=sigma_obs)%0A )%0A%0Adata, stateseq = truemodel.generate(2000)%0A%0A###############%0A# fit model #%0A###############%0A%0Amodel = LDS(%0A init_dynamics_distn=Gaussian(mu_0=np.zeros(2),nu_0=3.,sigma_0=3*np.eye(2),kappa_0=1.),%0A dynamics_distn=AutoRegression(nu_0=3.,S_0=np.eye(2),M_0=np.zeros((2,2)),K_0=5*np.eye(2)),%0A emission_distn=Regression(nu_0=2.,S_0=np.eye(1),M_0=np.zeros((1,2)),K_0=5*np.eye(2)),%0A )%0A%0Amodel.add_data(data,stateseq=stateseq)%0A# model.add_data(data)%0A%0Amodel.resample_parameters()%0Afor _ in progprint_xrange(100):%0A model.resample_model()%0A%0Aprint np.linalg.eigvals(A)%0Aprint np.linalg.eigvals(model.dynamics_distn.A)%0A%0A
|
|
2e4bb9ca00c992dab0967b3238d8aebd8710d79d
|
Create controller.py
|
src/controller.py
|
src/controller.py
|
Python
| 0.000001 |
@@ -0,0 +1,72 @@
+#!/usr/bin/env python%0Aimport rospy%0A%0Aif __name__ == '__main__':%0A pass%0A
|
|
c3748579854ae06c995cb12ea45a1be4de8f827d
|
Add gallery migration
|
features/galleries/migrations/0003_auto_20170421_1109.py
|
features/galleries/migrations/0003_auto_20170421_1109.py
|
Python
| 0 |
@@ -0,0 +1,576 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11 on 2017-04-21 09:09%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('galleries', '0002_auto_20170421_0934'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='galleryimage',%0A name='gallery',%0A field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='gallery_images', to='content2.Content'),%0A ),%0A %5D%0A
|
|
a6381765ad8e15624a5dabb848283e92b0e90d8c
|
Create rpkm_bin.py
|
code_collection/rpkm_bin.py
|
code_collection/rpkm_bin.py
|
Python
| 0.000003 |
@@ -0,0 +1,934 @@
+import sys%0A%0Apeak=%5B%5D%0Awith open(sys.argv%5B1%5D,'r') as f:%0A%09for line in f:%0A%09%09line=line.strip('%5Cn').split('%5Ct')%0A%09%09peak.append(line)%0A%0Abed=%5B%5D%0Awith open(sys.argv%5B2%5D,'r') as f:%0A%09for line in f:%0A%09%09line=line.strip('%5Cn').split('%5Ct')%0A%09%09bed.append(line)%0A%0ASIZE=int(sys.argv%5B3%5D)%0A%0Aindex=0%0An=len(peak)%0Anum=%5B0%5D*n%0Afor read in bed:%0A%09mid=(int(read%5B1%5D)+int(read%5B2%5D))/2%0A%09while (index%3Cn-1 and mid%3Eint(peak%5Bindex%5D%5B2%5D)) or (index%3Cn-1 and read%5B0%5D!=peak%5Bindex%5D%5B0%5D):%0A%09%09index+=1%0A%09num%5Bindex%5D+=1%0A%09if (index%3Cn-1) and (mid==int(peak%5Bindex+1%5D%5B1%5D)):%0A%09%09num%5Bindex+1%5D+=1%0A%0Aoutput=%5B%5D%0Afor i in range(n):%0A%09if num%5Bi%5D!=0:%0A%09%09y=1.0*num%5Bi%5D*10**9/SIZE/(int(peak%5Bi%5D%5B2%5D)-int(peak%5Bi%5D%5B1%5D))%0A%09%09y='%25.4f'%25y%0A%09%09output.append(peak%5Bi%5D%5B0%5D+'%5Ct'+peak%5Bi%5D%5B1%5D+'%5Ct'+peak%5Bi%5D%5B2%5D+'%5Ct'+peak%5Bi%5D%5B3%5D+'%5Ct'+str(num%5Bi%5D)+'%5Ct'+y+'%5Cn')%0A%09else:%0A%09%09output.append(peak%5Bi%5D%5B0%5D+'%5Ct'+peak%5Bi%5D%5B1%5D+'%5Ct'+peak%5Bi%5D%5B2%5D+'%5Ct'+peak%5Bi%5D%5B3%5D+'%5Ct'+str(num%5Bi%5D)+'%5Ct'+str(0)+'%5Cn')%0A%0Awith open('reads.txt','w') as f:%0A%09f.writelines(output)%0Af.close()%0A
|
|
85202173cf120caad603315cd57fa66857a88b0b
|
Add missing migrations for institutions
|
feder/institutions/migrations/0013_auto_20170810_2118.py
|
feder/institutions/migrations/0013_auto_20170810_2118.py
|
Python
| 0.000006 |
@@ -0,0 +1,535 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.2 on 2017-08-10 21:18%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Aimport jsonfield.fields%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('institutions', '0012_auto_20170808_0309'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='institution',%0A name='extra',%0A field=jsonfield.fields.JSONField(blank=True, verbose_name=b'Unorganized additional information'),%0A ),%0A %5D%0A
|
|
4bb5653f5f7f95bf28b2ee596c441cbc4c7fbf3a
|
Create whitefilterstr.py
|
whitefilterstr.py
|
whitefilterstr.py
|
Python
| 0.000016 |
@@ -0,0 +1,539 @@
+def whiteListCharFilter(inStr, whiteListStr):%0A %22%22%22 Sanatize a string with a list of allowed (white) characters%0A%0A Input: inStr %7Bstring%7D String to be sanatized.%0A Input: whiteListStr %7Bstring%7D String with allowed characters.%0A Output: outStr %7Bstring%7D Sanatized string%0A %22%22%22%0A outStr = %22%22%0A if (isinstance(inStr, str) or isinstance(whiteListStrinStr, str)) == False:%0A return outStr%0A for characterStr in inStr:%0A if characterStr in whiteListStr:%0A outStr = outStr + characterStr%0A return outStr%0A %0A
|
|
4bfb560dc9f28d850a89c98590df032849cfc035
|
Create zoql.py
|
zoql.py
|
zoql.py
|
Python
| 0.000164 |
@@ -0,0 +1,1180 @@
+#!/usr/local/bin/python3%0A%0Aimport sys%0Aimport cmd%0Aimport csv%0A%0Aimport pdb%0A%0Aimport config%0Afrom zuora import Zuora%0A%0Azuora = Zuora(config.zuoraConfig)%0A%0A%0Adef zuoraObjectKeys(zouraObject):%0A if zouraObject:%0A return zouraObject.keys()%0A %0Adef dumpRecords(records):%0A if records:%0A firstRecord = records%5B0%5D%0A keys = %5Bkey for key in zuoraObjectKeys(firstRecord) if firstRecord%5Bkey%5D%5D%0A %0A print(','.join(keys))%0A %0A for record in records:%0A print(','.join(str(record%5Bkey%5D) for key in keys))%0A %0A print(len(records), 'records')%0A%0Aclass Interpeter(cmd.Cmd):%0A def do_select(self, line):%0A try:%0A if '.' in line:%0A csvData = zuora.queryExport('select ' + line).split('%5Cn')%0A records = %5Brecord for record in csv.DictReader(csvData)%5D%0A else:%0A records = zuora.queryAll('select ' + line)%0A dumpRecords(records)%0A except Exception as e:%0A print('Error: q', repr(e))%0A%0A def do_q(self, line):%0A return self.do_EOF(line)%0A%0A def do_EOF(self, line):%0A return True%0A%0Aif __name__ == '__main__':%0A Interpeter().cmdloop()%0A
|
|
a7a16e4317b9b61d3e01b8ec54ad499d73a3a600
|
Fix tracebacks
|
homeassistant/components/upnp/config_flow.py
|
homeassistant/components/upnp/config_flow.py
|
"""Config flow for UPNP."""
import voluptuous as vol
from homeassistant import config_entries
from homeassistant import data_entry_flow
from .const import (
CONF_ENABLE_PORT_MAPPING, CONF_ENABLE_SENSORS,
CONF_SSDP_DESCRIPTION, CONF_UDN
)
from .const import DOMAIN
def ensure_domain_data(hass):
"""Ensure hass.data is filled properly."""
hass.data[DOMAIN] = hass.data.get(DOMAIN, {})
hass.data[DOMAIN]['devices'] = hass.data[DOMAIN].get('devices', {})
hass.data[DOMAIN]['sensors'] = hass.data[DOMAIN].get('sensors', {})
hass.data[DOMAIN]['discovered'] = hass.data[DOMAIN].get('discovered', {})
hass.data[DOMAIN]['auto_config'] = hass.data[DOMAIN].get('auto_config', {
'active': False,
'port_forward': False,
'sensors': False,
'ports': {'hass': 'hass'},
})
@config_entries.HANDLERS.register(DOMAIN)
class UpnpFlowHandler(data_entry_flow.FlowHandler):
"""Handle a Hue config flow."""
VERSION = 1
@property
def _configured_upnp_igds(self):
"""Get all configured IGDs."""
return {
entry.data[CONF_UDN]: {
'udn': entry.data[CONF_UDN],
}
for entry in self.hass.config_entries.async_entries(DOMAIN)
}
@property
def _discovered_upnp_igds(self):
"""Get all discovered entries."""
return self.hass.data[DOMAIN]['discovered']
def _store_discovery_info(self, discovery_info):
"""Add discovery info."""
udn = discovery_info['udn']
self.hass.data[DOMAIN]['discovered'][udn] = discovery_info
def _auto_config_settings(self):
"""Check if auto_config has been enabled."""
return self.hass.data[DOMAIN]['auto_config']
async def async_step_discovery(self, discovery_info):
"""
Handle a discovered UPnP/IGD.
This flow is triggered by the discovery component. It will check if the
host is already configured and delegate to the import step if not.
"""
ensure_domain_data(self.hass)
# store discovered device
discovery_info['friendly_name'] = \
'{} ({})'.format(discovery_info['host'], discovery_info['name'])
self._store_discovery_info(discovery_info)
# ensure not already discovered/configured
udn = discovery_info['udn']
if udn in self._configured_upnp_igds:
return self.async_abort(reason='already_configured')
# auto config?
auto_config = self._auto_config_settings()
if auto_config['active']:
import_info = {
'name': discovery_info['friendly_name'],
'sensors': auto_config['sensors'],
'port_forward': auto_config['port_forward'],
}
return await self._async_save_entry(import_info)
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Manual set up."""
# if user input given, handle it
user_input = user_input or {}
if 'name' in user_input:
if not user_input['sensors'] and not user_input['port_forward']:
return self.async_abort(reason='no_sensors_or_port_forward')
# ensure not already configured
configured_names = [
entry['friendly_name']
for udn, entry in self._discovered_upnp_igds.items()
if udn in self._configured_upnp_igds
]
if user_input['name'] in configured_names:
return self.async_abort(reason='already_configured')
return await self._async_save_entry(user_input)
# let user choose from all discovered, non-configured, UPnP/IGDs
names = [
entry['friendly_name']
for udn, entry in self._discovered_upnp_igds.items()
if udn not in self._configured_upnp_igds
]
if not names:
return self.async_abort(reason='no_devices_discovered')
return self.async_show_form(
step_id='user',
data_schema=vol.Schema({
vol.Required('name'): vol.In(names),
vol.Optional('sensors', default=False): bool,
vol.Optional('port_forward', default=False): bool,
})
)
async def async_step_import(self, import_info):
"""Import a new UPnP/IGD as a config entry."""
return await self._async_save_entry(import_info)
async def _async_save_entry(self, import_info):
"""Store UPNP/IGD as new entry."""
# ensure we know the host
name = import_info['name']
discovery_infos = [info
for info in self._discovered_upnp_igds.values()
if info['friendly_name'] == name]
if not discovery_infos:
return self.async_abort(reason='host_not_found')
discovery_info = discovery_infos[0]
return self.async_create_entry(
title=discovery_info['name'],
data={
CONF_SSDP_DESCRIPTION: discovery_info['ssdp_description'],
CONF_UDN: discovery_info['udn'],
CONF_ENABLE_SENSORS: import_info['sensors'],
CONF_ENABLE_PORT_MAPPING: import_info['port_forward'],
},
)
|
Python
| 0.000015 |
@@ -2963,24 +2963,63 @@
set up.%22%22%22%0A
+ ensure_domain_data(self.hass)%0A%0A
# if
@@ -4465,32 +4465,71 @@
onfig entry.%22%22%22%0A
+ ensure_domain_data(self.hass)%0A%0A
return a
@@ -4657,32 +4657,71 @@
s new entry.%22%22%22%0A
+ ensure_domain_data(self.hass)%0A%0A
# ensure
|
eb250318cf6933b4a037bd9ea238ce0fc7be58c2
|
add first script
|
gitthemall.py
|
gitthemall.py
|
Python
| 0.000018 |
@@ -0,0 +1,1214 @@
+#! /usr/bin/env python2%0Aimport argparse%0Aimport os.path%0Aimport logging%0Aimport sys%0A%0Alogging.basicConfig(format='%25(levelname)s: %25(message)s')%0A%0Adef fail(msg):%0A 'Fail program with printed message'%0A logging.error(msg)%0A sys.exit(1)%0A%0Adef update(repo, actions):%0A 'Update repo according to allowed actions.'%0A repo = os.path.expanduser(repo)%0A logging.debug('going to %25s' %25 repo)%0A if not os.path.isdir(repo):%0A fail('No directory at %25s!' %25 repo)%0A if not os.path.isdir(os.path.join(repo, '.git')):%0A fail('No git repo at %25s!' %25 repo)%0A %0Adef parse(config):%0A 'Parse config and yield repos with actions'%0A with open(config) as f:%0A for line in f:%0A items = line.strip().split(',')%0A yield items%5B0%5D, items%5B1:%5D%0A%0Aif __name__ == '__main__':%0A parser = argparse.ArgumentParser(description='Keep git repos up-to-date.')%0A parser.add_argument('config', type=str, help='config file that lists repos')%0A parser.add_argument('-v', '--verbose', default=False, action='store_true')%0A args = parser.parse_args()%0A%0A if args.verbose:%0A logging.getLogger().setLevel(logging.DEBUG)%0A%0A for repo, actions in parse(args.config):%0A update(repo, actions)%0A
|
|
f9ea992353f2caa835ca2007eb07b470d1b782a3
|
Fix migration colorfield
|
geotrek/trekking/migrations/0006_practice_mobile_color.py
|
geotrek/trekking/migrations/0006_practice_mobile_color.py
|
Python
| 0.000002 |
@@ -0,0 +1,606 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.14 on 2019-03-04 12:43%0Afrom __future__ import unicode_literals%0A%0Aimport colorfield.fields%0Afrom django.db import migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('trekking', '0005_auto_20181219_1524'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='practice',%0A name='mobile_color',%0A field=colorfield.fields.ColorField(db_column=b'couleur_mobile', default=b'#444444', help_text=%22Color's practice in mobile%22, max_length=18, verbose_name='Mobile color'),%0A ),%0A %5D%0A
|
|
96b7a859e04673af63732b061298b3b852b08250
|
Fix filetransfer version
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
import bluebottle
from setuptools import setup, find_packages
def read_file(name):
return open(os.path.join(os.path.dirname(__file__), name)).read()
readme = read_file('README.rst')
changes = ''
dependency_links = [
'git+https://github.com/onepercentclub/django-taggit-autocomplete-modified.git@8e7fbc2deae2f1fbb31b574bc8819d9ae7c644d6#egg=django-taggit-autocomplete-modified-0.1.1b1',
'git+https://github.com/onepercentclub/django-fluent-contents.git@8439c7ffc1ba8877247aa7d012928c9bb170dc79#egg=fluent_contents-1.0c3',
'git+https://github.com/onepercentclub/[email protected]#egg=django-bb-salesforce-1.1.18',
'git+https://github.com/onepercentclub/[email protected]#egg=django-tenant-extras-2.0.0',
'git+https://github.com/onepercentclub/[email protected]#egg=django-token-auth-0.2.16'
]
install_requires = [
'Babel==2.3.4',
'BeautifulSoup==3.2.1',
'Django==1.9.6',
'Pillow==3.2.0',
'South==1.0.2',
'Sphinx==1.4.1',
'bunch==1.0.1',
'celery==3.1.23',
'django-celery==3.1.17',
'django-choices==1.4.2',
'django-extensions==1.3.0',
'django-exportdb==0.4.6',
'django-filter==0.13.0',
'django-geoposition==0.2.2',
'django-localflavor==1.2',
'django-modeltranslation==0.11',
'django-taggit==0.18.3',
'django-tinymce==2.3.0',
'django-uuidfield==0.5.0',
'django-wysiwyg==0.7.1',
'django-dynamic-fixture==1.8.5',
'django-fluent-dashboard==0.3.2',
'djangorestframework==3.3.3',
'dkimpy==0.5.6',
'micawber==0.3.3',
'requests==2.5.1',
'sorl-thumbnail==12.3',
'transifex-client==0.11',
'django-tools==0.30.0',
'django-loginas==0.1.9',
'beautifulsoup4==4.4.1',
'psycopg2==2.6.1',
'django-fsm==2.4.0',
'suds-jurko==0.6',
'django-ipware==1.1.5',
'pygeoip==0.3.2',
'python-social-auth==0.2.12',
'python-memcached==1.57',
'lxml==3.6.0',
'unicodecsv==0.14.1',
'python-dateutil==2.5.3',
'gunicorn==19.5.0',
'surlex==0.2.0',
'django_polymorphic==0.9.2',
'fabric',
'django-tenant-schemas==1.5.8',
'raven==5.16.0',
'regex==2016.4.25',
'djangorestframework-jwt==1.8.0',
'django-filetransfers==0.1.0',
'django-admin-tools==0.7.2',
'django-rest-swagger==0.3.6',
'django-lockdown==1.2',
'mixpanel==4.3.0',
'wheel==0.29.0',
# Github requirements
'django-taggit-autocomplete-modified==0.1.1b1',
'django-fluent-contents==1.1.4',
'django-bb-salesforce==1.1.18',
'django-tenant-extras==2.0.0',
'django-token-auth==0.2.16',
]
tests_requires = [
'httmock==1.2.5',
'coverage==4.0.3',
'django-nose==1.4.3',
'django-setuptest==0.2.1',
'factory-boy==2.7.0',
'mock==2.0.0',
'nose==1.3.7',
'pylint==1.5.5',
'tdaemon==0.1.1',
'WebTest==2.0.21',
'django-webtest==1.7.9',
'pyquery==1.2.13'
]
dev_requires = [
'ipdb'
]
setup(
name='bluebottle',
version=bluebottle.__version__,
license='BSD',
# Packaging.
packages=find_packages(exclude=('tests', 'tests.*')),
install_requires=install_requires,
dependency_links=dependency_links,
# You can install these using the following syntax, for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': dev_requires,
'test': tests_requires,
},
include_package_data=True,
zip_safe=False,
# Metadata for PyPI.
description='Bluebottle, the crowdsourcing framework initiated by the 1%Club.',
long_description='\n\n'.join([readme, changes]),
author='1%Club',
author_email='[email protected]',
platforms=['any'],
url='https://github.com/onepercentclub/bluebottle',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Django',
'Intended Audience :: Developers',
'Operating System :: Unix',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Application Frameworks'
]
)
|
Python
| 0.000001 |
@@ -880,16 +880,155 @@
-0.2.16'
+,%0A 'hg+https://bitbucket.org/jdiascarvalho/django-filetransfers@89c8381764da217d72f1fa396ce3929f0762b8f9#egg=django-filetransfers-0.1.1'
%0A%5D%0A%0Ainst
@@ -2407,17 +2407,17 @@
rs==0.1.
-0
+1
',%0A '
|
3959ad4a4ddc4655c1acd8362de4284ba1e8d3e7
|
Apply the hack that renames local_settings.py only when running setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import setup, find_packages
from setuptest import test
import os
'''
Rename local_settings.py in order to
be excluded from setup.py install command
'''
ORIG_NAME = 'cronos/local_settings.py'
TEMP_NAME = 'cronos/local_settings.py1'
try:
os.rename(ORIG_NAME, TEMP_NAME)
except:
pass
setup(
name='cronos',
version='0.3-dev',
description='Django application that collects announcements and other \
personal data for students of TEI of Larissa',
author='cronos development team',
author_email='[email protected]',
url='http://cronos.teilar.gr',
license='AGPLv3',
packages=find_packages(),
include_package_data=True,
data_files=[
('', ['LICENSE', 'manage.py']),
('bin', [
'bin/update_cronos.sh',
'bin/logs_create_fix_perms.sh',
'bin/get_full_production_db.sh'
]),
('configs', [
'configs/apache.conf',
'configs/cron.d_cronos',
'configs/logrotate.d_cronos',
'configs/logrotate.d_cronos-dev',
'configs/syslog-ng.conf'
]),
],
cmdclass={'test': test},
)
'''
Restore local_settings.py
'''
try:
os.rename(TEMP_NAME, ORIG_NAME)
except:
pass
|
Python
| 0.000001 |
@@ -96,22 +96,64 @@
mport os
-%0A%0A'''%0A
+, sys%0A%0Aif sys.argv%5B1%5D == 'install':%0A '''%0A
Rename l
@@ -181,16 +181,20 @@
rder to%0A
+
be exclu
@@ -227,20 +227,28 @@
command%0A
-'''%0A
+ '''%0A
ORIG_NAM
@@ -278,16 +278,20 @@
ngs.py'%0A
+
TEMP_NAM
@@ -322,16 +322,20 @@
gs.py1'%0A
+
try:%0A
@@ -327,32 +327,36 @@
1'%0A try:%0A
+
+
os.rename(ORIG_N
@@ -367,32 +367,36 @@
TEMP_NAME)%0A
+
except:%0A
pass%0A%0Ase
@@ -375,32 +375,36 @@
ME)%0A except:%0A
+
pass%0A%0Asetup(
@@ -1243,20 +1243,57 @@
st%7D,%0A)%0A%0A
-'''%0A
+if sys.argv%5B1%5D == 'install':%0A '''%0A
Restore
@@ -1314,17 +1314,29 @@
.py%0A
-'''%0A
+ '''%0A
try:%0A
+
@@ -1367,24 +1367,28 @@
G_NAME)%0A
+
except:%0A
pass
@@ -1375,21 +1375,25 @@
except:%0A
+
pass%0A
|
1c181eb7f9987d2147df48a762d34895593f031a
|
Add version for torch dependency
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import io
import os
import shutil
import subprocess
from pathlib import Path
import distutils.command.clean
from setuptools import setup, find_packages
from build_tools import setup_helpers
ROOT_DIR = Path(__file__).parent.resolve()
def read(*names, **kwargs):
with io.open(ROOT_DIR.joinpath(*names), encoding=kwargs.get("encoding", "utf8")) as fp:
return fp.read()
def _get_version():
version = '0.9.0a0'
sha = None
try:
cmd = ['git', 'rev-parse', 'HEAD']
sha = subprocess.check_output(cmd, cwd=str(ROOT_DIR)).decode('ascii').strip()
except Exception:
pass
if os.getenv('BUILD_VERSION'):
version = os.getenv('BUILD_VERSION')
elif sha is not None:
version += '+' + sha[:7]
if sha is None:
sha = 'Unknown'
return version, sha
def _export_version(version, sha):
version_path = ROOT_DIR / 'torchtext' / 'version.py'
with open(version_path, 'w') as fileobj:
fileobj.write("__version__ = '{}'\n".format(version))
fileobj.write("git_version = {}\n".format(repr(sha)))
VERSION, SHA = _get_version()
_export_version(VERSION, SHA)
print('-- Building version ' + VERSION)
class clean(distutils.command.clean.clean):
def run(self):
# Run default behavior first
distutils.command.clean.clean.run(self)
# Remove torchtext extension
for path in (ROOT_DIR / 'torchtext').glob('**/*.so'):
print(f'removing \'{path}\'')
path.unlink()
# Remove build directory
build_dirs = [
ROOT_DIR / 'build',
ROOT_DIR / 'third_party' / 'build',
]
for path in build_dirs:
if path.exists():
print(f'removing \'{path}\' (and everything under it)')
shutil.rmtree(str(path), ignore_errors=True)
setup_info = dict(
# Metadata
name='torchtext',
version=VERSION,
author='PyTorch core devs and James Bradbury',
author_email='[email protected]',
url='https://github.com/pytorch/text',
description='Text utilities and datasets for PyTorch',
long_description=read('README.rst'),
license='BSD',
install_requires=[
'tqdm', 'requests', 'torch', 'numpy'
],
python_requires='>=3.5',
classifiers=[
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3 :: Only',
],
# Package info
packages=find_packages(exclude=('test*', 'build_tools*')),
zip_safe=False,
# Extension info
# If you are trying to use torchtext.so and see no registered op.
# See here: https://github.com/pytorch/vision/issues/2134"
ext_modules=setup_helpers.get_ext_modules(),
cmdclass={
'build_ext': setup_helpers.BuildExtension.with_options(no_python_abi_suffix=True),
'clean': clean,
},
)
setup(**setup_info)
|
Python
| 0 |
@@ -1208,16 +1208,201 @@
RSION)%0A%0A
+pytorch_package_version = os.getenv('PYTORCH_VERSION')%0A%0Apytorch_package_dep = 'torch'%0Aif pytorch_package_version is not None:%0A pytorch_package_dep += %22==%22 + pytorch_package_version%0A%0A
%0Aclass c
@@ -2437,23 +2437,35 @@
uests',
-'
+py
torch
-'
+_package_dep
, 'numpy
|
b187e844d667b14dcc7874b351ee3f82383be348
|
Fix dependency reference error
|
setup.py
|
setup.py
|
import ast
import re
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('puckdb/__init__.py', 'rb') as f:
version = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name='puckdb',
author='Aaron Toth',
version=version,
url='https://github.com/aaront/puckdb',
description='An async-first hockey data extractor and API',
long_description=open('README.rst').read(),
test_suite="tests",
include_package_data=True,
packages=find_packages(),
package_data={'': ['LICENSE']},
package_dir={'puckdb': 'puckdb'},
license='Apache 2.0',
install_requires=[
'aiodns',
'cchardet',
'aiohttp',
'aiodns',
'dotenv',
'asyncpg',
'asyncpgsa',
'click',
'click-datetime',
'python-dateutil',
'pytz',
'pg8000',
'sqlalchemy',
'ujson',
'python-dotenv',
'dataclasses',
'alembic',
'pint'
],
entry_points='''
[console_scripts]
puckdb=puckdb.console:main
''',
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python',
'Programming Language :: Python :: 3.7',
'Topic :: Software Development :: Libraries'
)
)
|
Python
| 0.000007 |
@@ -772,26 +772,8 @@
s',%0A
- 'dotenv',%0A
|
b782a5af281a9fc841477d2360197c6e35ee794d
|
Version bump (there will be a day when I remember earlier)
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from distutils.core import setup
setup(name='mongodbforms',
version='0.1d',
description="An implementation of django forms using mongoengine.",
author='Jan Schrewe',
author_email='[email protected]',
url='http://www.schafproductions.com',
packages=['mongodbforms',],
package_data={
},
)
|
Python
| 0 |
@@ -93,17 +93,17 @@
ion='0.1
-d
+e
',%0A d
|
40c6a07808be26de0534a5b6f47ef28f591a500c
|
bump again
|
setup.py
|
setup.py
|
from setuptools import setup, find_packages
requires = []
dep_links = []
for dep in open('requirements.txt').read().split("\n"):
if dep.startswith('git+'):
dep_links.append(dep)
else:
requires.append(dep)
setup(
name='django-suave',
version="0.5.6",
description='Rather nice pages.',
long_description=open('README.rst').read(),
url='https://github.com/radiosilence/django-suave',
author='James Cleveland',
author_email='[email protected]',
packages=find_packages(),
include_package_data=True,
license="LICENSE.txt",
install_requires=requires,
dependency_links=dep_links,
)
|
Python
| 0 |
@@ -278,9 +278,9 @@
0.5.
-6
+7
%22,%0A
|
2672d4af2c480cfbd83418db2b660a335bdf4540
|
bump version on invenio packages
|
setup.py
|
setup.py
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Invenio Digital Library Framework."""
import os
from setuptools import find_packages, setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read()
tests_require = [
'check-manifest>=0.35',
'coverage>=4.4.1',
'isort>=4.3',
'pydocstyle>=2.0.0',
'pytest-cov>=2.5.1',
'pytest-pep8>=1.0.6',
'pytest>=3.3.1',
'pytest-invenio>=1.0.0,<1.1.0',
]
db_version = '>=1.0.1,<1.1.0'
search_version = '>=1.0.0,<1.1.0'
extras_require = {
# Bundles
'base': [
'invenio-admin>=1.0.0,<1.1.0',
'invenio-assets>=1.0.0,<1.1.0',
'invenio-formatter>=1.0.0,<1.1.0',
'invenio-logging>=1.0.0,<1.1.0',
'invenio-mail>=1.0.0,<1.1.0',
'invenio-rest>=1.0.0,<1.1.0',
'invenio-theme>=1.0.0,<1.1.0',
],
'auth': [
'invenio-access>=1.0.0,<1.1.0',
'invenio-accounts>=1.0.0,<1.1.0',
'invenio-oauth2server>=1.0.0,<1.1.0',
'invenio-oauthclient>=1.0.0,<1.1.0',
'invenio-userprofiles>=1.0.0,<1.1.0',
],
'metadata': [
'invenio-indexer>=1.0.0,<1.1.0',
'invenio-jsonschemas>=1.0.0,<1.1.0',
'invenio-oaiserver>=1.0.0,<1.1.0',
'invenio-pidstore>=1.0.0,<1.1.0',
'invenio-records-rest>=1.0.0,<1.1.0',
'invenio-records-ui>=1.0.0,<1.1.0',
'invenio-records>=1.0.0,<1.1.0',
'invenio-search-ui>=1.0.0,<1.1.0',
],
# Database version
'postgresql': [
'invenio-db[postgresql,versioning]{}'.format(db_version),
],
'mysql': [
'invenio-db[mysql,versioning]{}'.format(db_version),
],
'sqlite': [
'invenio-db[versioning]{}'.format(db_version),
],
# Elasticsearch version
'elasticsearch2': [
'invenio-search[elasticsearch2]{}'.format(search_version),
],
'elasticsearch5': [
'invenio-search[elasticsearch5]{}'.format(search_version),
],
'elasticsearch6': [
'invenio-search[elasticsearch6]{}'.format(search_version),
],
# Docs and test dependencies
'docs': [
'Sphinx>=1.5.1',
],
'tests': tests_require,
}
extras_require['all'] = []
for name, reqs in extras_require.items():
if name in ('sqlite', 'mysql', 'postgresql') \
or name.startswith('elasticsearch'):
continue
extras_require['all'].extend(reqs)
setup_requires = [
'pytest-runner>=3.0.0,<5',
]
install_requires = [
'Flask>=0.11.1',
'invenio-app>=1.0.0,<1.1.0',
'invenio-base>=1.0.0,<1.1.0',
'invenio-celery>=1.0.0,<1.1.0',
'invenio-config>=1.0.0,<1.1.0',
'invenio-i18n>=1.0.0,<1.1.0',
]
packages = find_packages()
# Get the version string. Cannot be done with import!
g = {}
with open(os.path.join('invenio', 'version.py'), 'rt') as fp:
exec(fp.read(), g)
version = g['__version__']
setup(
name='invenio',
version=version,
description=__doc__,
long_description=readme + '\n\n' + history,
keywords='Invenio digital library framework',
license='MIT',
author='CERN',
author_email='[email protected]',
url='https://github.com/inveniosoftware/invenio',
packages=packages,
zip_safe=False,
include_package_data=True,
platforms='any',
entry_points={},
extras_require=extras_require,
install_requires=install_requires,
setup_requires=setup_requires,
tests_require=tests_require,
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Development Status :: 5 - Production/Stable',
],
)
|
Python
| 0 |
@@ -604,25 +604,25 @@
nvenio%3E=1.0.
-0
+2
,%3C1.1.0',%0A%5D%0A
@@ -920,25 +920,25 @@
o-mail%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -1058,25 +1058,25 @@
access%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -1096,33 +1096,33 @@
o-accounts%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -1142,33 +1142,33 @@
uth2server%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -1237,25 +1237,25 @@
ofiles%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -1477,32 +1477,32 @@
rds-rest%3E=1.
-0
+1
.0,%3C1.
-1
+2
.0',%0A
@@ -1519,33 +1519,33 @@
records-ui%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -1607,25 +1607,25 @@
rch-ui%3E=1.0.
-0
+1
,%3C1.1.0',%0A
@@ -2712,25 +2712,25 @@
o-base%3E=1.0.
-0
+1
,%3C1.1.0',%0A
|
9be0f7d7ce0a562fdda5fc065a8971ccdf3b7c2c
|
Bump version to 1.1.0
|
setup.py
|
setup.py
|
#!/usr/bin/env python
import os
import subprocess
import tarfile
import shutil
import sysconfig
import requests
from setuptools import setup
from setuptools.command.build_ext import build_ext
from setuptools.extension import Extension
def urlretrieve(source_url, destination_path):
response = requests.get(source_url, stream=True)
if response.status_code != 200:
raise Exception("status code was: {}".format(response.status_code))
with open(destination_path, "wb") as fileobj:
for chunk in response.iter_content(chunk_size=128):
fileobj.write(chunk)
def path_in_dir(relative_path):
return os.path.abspath(os.path.join(os.path.dirname(__file__), relative_path))
def dependency_path(relative_path):
return os.path.join(path_in_dir("_deps"), relative_path)
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
jq_lib_tarball_path = dependency_path("jq-lib-1.6.tar.gz")
jq_lib_dir = dependency_path("jq-1.6")
oniguruma_version = "6.9.4"
oniguruma_lib_tarball_path = dependency_path("onig-{}.tar.gz".format(oniguruma_version))
oniguruma_lib_build_dir = dependency_path("onig-{}".format(oniguruma_version))
oniguruma_lib_install_dir = dependency_path("onig-install-{}".format(oniguruma_version))
class jq_build_ext(build_ext):
def run(self):
if not os.path.exists(dependency_path(".")):
os.makedirs(dependency_path("."))
self._build_oniguruma()
self._build_libjq()
build_ext.run(self)
def _build_oniguruma(self):
self._build_lib(
source_url="https://github.com/kkos/oniguruma/releases/download/v{0}/onig-{0}.tar.gz".format(oniguruma_version),
tarball_path=oniguruma_lib_tarball_path,
lib_dir=oniguruma_lib_build_dir,
commands=[
["./configure", "CFLAGS=-fPIC", "--prefix=" + oniguruma_lib_install_dir],
["make"],
["make", "install"],
])
def _build_libjq(self):
self._build_lib(
source_url="https://github.com/stedolan/jq/releases/download/jq-1.6/jq-1.6.tar.gz",
tarball_path=jq_lib_tarball_path,
lib_dir=jq_lib_dir,
commands=[
["autoreconf", "-i"],
["./configure", "CFLAGS=-fPIC", "--disable-maintainer-mode", "--with-oniguruma=" + oniguruma_lib_install_dir],
["make"],
])
def _build_lib(self, source_url, tarball_path, lib_dir, commands):
self._download_tarball(
source_url=source_url,
tarball_path=tarball_path,
lib_dir=lib_dir,
)
macosx_deployment_target = sysconfig.get_config_var("MACOSX_DEPLOYMENT_TARGET")
if macosx_deployment_target:
os.environ['MACOSX_DEPLOYMENT_TARGET'] = macosx_deployment_target
def run_command(args):
print("Executing: %s" % ' '.join(args))
subprocess.check_call(args, cwd=lib_dir)
for command in commands:
run_command(command)
def _download_tarball(self, source_url, tarball_path, lib_dir):
if os.path.exists(tarball_path):
os.unlink(tarball_path)
print("Downloading {}".format(source_url))
urlretrieve(source_url, tarball_path)
print("Downloaded {}".format(source_url))
if os.path.exists(lib_dir):
shutil.rmtree(lib_dir)
tarfile.open(tarball_path, "r:gz").extractall(dependency_path("."))
jq_extension = Extension(
"jq",
sources=["jq.c"],
include_dirs=[os.path.join(jq_lib_dir, "src")],
extra_link_args=["-lm"],
extra_objects=[
os.path.join(jq_lib_dir, ".libs/libjq.a"),
os.path.join(oniguruma_lib_install_dir, "lib/libonig.a"),
],
)
setup(
name='jq',
version='1.0.2',
description='jq is a lightweight and flexible JSON processor.',
long_description=read("README.rst"),
author='Michael Williamson',
url='http://github.com/mwilliamson/jq.py',
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
license='BSD 2-Clause',
ext_modules = [jq_extension],
cmdclass={"build_ext": jq_build_ext},
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
)
|
Python
| 0 |
@@ -3840,11 +3840,11 @@
='1.
-0.2
+1.0
',%0A
|
71fb2fc819c82e2db4075c6e5e32b2addc99c63a
|
Add platforms and classifiers
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
setup(name='gsmsapi',
version='0.10',
description='SMS API for (german) SMS providers',
author='Torge Szczepanek',
author_email='[email protected]',
maintainer='Torge Szczepanek',
maintainer_email='[email protected]',
license='MIT',
packages=['gsmsapi'],
url = 'https://github.com/CygnusNetworks/python-gsmsapi',
download_url = 'https://github.com/CygnusNetworks/python-gsmsapi/tarball/v0.10',
keywords = ["sms", "german", "sipgate", "smstrade", "api"],
)
|
Python
| 0.000001 |
@@ -549,10 +549,366 @@
%22api%22%5D,%0A
+%09platforms='any',%0A%09classifiers=%5B%0A%09%09'Development Status :: 4 - Beta',%0A%09%09'Intended Audience :: Developers',%0A%09%09'License :: OSI Approved :: MIT License',%0A%09%09'Operating System :: OS Independent',%0A%09%09'Programming Language :: Python',%0A%09%09'Topic :: Software Development :: Libraries :: Python Modules'%5D # see: https://pypi.python.org/pypi?%253Aaction=list_classifiers%0A
)%0A
|
8b8383680e73496a73a3a520c3ebc85e2e01ce01
|
fix version in setup.py
|
setup.py
|
setup.py
|
#!/usr/bin/env python
"""
Flask-REST4
-------------
Elegant RESTful API for your Flask apps.
"""
from setuptools import setup
setup(
name='flask_rest4',
version='0.1.0',
url='https://github.com/squirrelmajik/flask_rest4',
license='See License',
author='majik',
author_email='[email protected]',
description='Elegant RESTful API for your Flask apps.',
long_description=__doc__,
py_modules=['flask_rest4'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
|
Python
| 0 |
@@ -170,17 +170,17 @@
on='0.1.
-0
+3
',%0A u
|
b03b6faea0470d867749c7b3bc3d6edc9c2406b9
|
Remove pytest-Django
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from setuptools import setup
from setuptools.command.test import test as TestCommand
# Utility function to read file in the setup.py directory
def open_here(fname):
return open(os.path.join(os.path.dirname(__file__), fname))
def get_dependencies(env_yml_file):
"""
Read the dependencies from a Conda environment file in YAML
and return a list of such dependencies (from conda and pip list)
Be sure to match packages specification for each of:
- Conda : http://conda.pydata.org/docs/spec.html#build-version-spec
- Pip & Setuptool :
- http://pythonhosted.org/setuptools/setuptools.html?highlight=install_require#declaring-dependencies
- https://pythonhosted.org/setuptools/pkg_resources.html#requirement-objects
"""
import yaml
with open_here(env_yml_file) as f:
environment = yaml.load(f)
conda_dependencies = []
package_map = {
'pytables': 'tables', # insert 'tables' instead of 'pytables'
'yaafe': ''
}
for dep in environment['dependencies']:
if isinstance(dep, str) and not(dep.startswith('python')):
if dep in package_map:
conda_dependencies.append(package_map[dep])
else:
conda_dependencies.append(dep)
elif isinstance(dep, dict) and 'pip' in dep:
pip_dependencies = dep['pip']
return conda_dependencies + pip_dependencies
# Pytest
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['tests', '--ignore', 'tests/sandbox', '--verbose', '--ds=app.test_settings']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
CLASSIFIERS = [
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Intended Audience :: Information Technology',
'Programming Language :: Python',
'Programming Language :: JavaScript',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Multimedia :: Sound/Audio',
'Topic :: Multimedia :: Sound/Audio :: Analysis',
'Topic :: Multimedia :: Sound/Audio :: Players',
'Topic :: Multimedia :: Sound/Audio :: Conversion',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Software Development :: Libraries :: Python Modules',
]
KEYWORDS = 'audio analysis features extraction MIR transcoding graph visualize plot HTML5 interactive metadata player'
setup(
name='TimeSide',
url='https://github.com/Parisson/TimeSide/',
description="Audio processing framework for the web",
long_description=open_here('README.rst').read(),
author="Guillaume Pellerin, Paul Brossier, Thomas Fillon, Riccardo Zaccarelli, Olivier Guilyardi",
author_email="[email protected], [email protected], [email protected], [email protected], [email protected]",
version='0.8.1',
setup_requires=['pyyaml'],
install_requires=[get_dependencies('conda-environment.yml')],
platforms=['OS Independent'],
license='Gnu Public License V2',
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
packages=['timeside'],
include_package_data=True,
zip_safe=False,
scripts=['scripts/timeside-waveforms', 'scripts/timeside-launch'],
tests_require=['pytest>=3', 'pytest-django'],
cmdclass={'test': PyTest},
)
|
Python
| 0.000006 |
@@ -1671,34 +1671,8 @@
ose'
-, '--ds=app.test_settings'
%5D%0A
@@ -3468,25 +3468,8 @@
%3E=3'
-, 'pytest-django'
%5D,%0A
|
656d24c38c69891d8731ccf32852b66e32120eb7
|
Bump dependency
|
setup.py
|
setup.py
|
#!/usr/bin/env python
from setuptools import find_packages, setup
project = "microcosm_pubsub"
version = "0.26.1"
setup(
name=project,
version=version,
description="PubSub with SNS/SQS",
author="Globality Engineering",
author_email="[email protected]",
url="https://github.com/globality-corp/microcosm-pubsub",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
include_package_data=True,
zip_safe=False,
install_requires=[
"boto3>=1.3.0",
"marshmallow>=2.12.1",
"microcosm>=0.17.1",
"microcosm-daemon>=0.10.0",
"microcosm-logging>=0.12.0",
],
setup_requires=[
"nose>=1.3.6",
],
dependency_links=[
],
entry_points={
"microcosm.factories": [
"sqs_message_context = microcosm_pubsub.context:configure_sqs_message_context",
"pubsub_message_schema_registry = microcosm_pubsub.registry:configure_schema_registry",
"sqs_consumer = microcosm_pubsub.consumer:configure_sqs_consumer",
"sqs_envelope = microcosm_pubsub.envelope:configure_sqs_envelope",
"sqs_message_dispatcher = microcosm_pubsub.dispatcher:configure",
"sqs_message_handler_registry = microcosm_pubsub.registry:configure_handler_registry",
"sns_producer = microcosm_pubsub.producer:configure_sns_producer",
"sns_topic_arns = microcosm_pubsub.producer:configure_sns_topic_arns",
]
},
tests_require=[
"coverage>=3.7.1",
"mock>=1.0.1",
"PyHamcrest>=1.8.5",
],
)
|
Python
| 0.000001 |
@@ -574,17 +574,17 @@
m%3E=0.17.
-1
+2
%22,%0A
|
5e9fa7a1bb8601fb5629d7e7e92a894ab335ccf1
|
update readme extension
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read().replace('.. :changelog:', '')
requirements = [
"wheel>=0.23.0",
"requests>=2.7.0",
"pandas>=0.16.2",
"docopt>=0.6.2"
]
test_requirements = [
# TODO: put package test requirements here
]
setup(
name='labkey_multisite_query_tool',
version='0.1.0',
description="Commandline tool for querying across mutltiple LabKey instances.",
long_description=readme + '\n\n' + history,
author="Stefan Novak",
author_email='[email protected]',
url='https://github.com/OHSUCompBio/labkey_multisite_query_tool',
packages=[
'labkey_multisite_query_tool',
],
package_dir={'labkey_multisite_query_tool':
'labkey_multisite_query_tool'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='labkey_multisite_query_tool',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
scripts=['bin/labkey'],
test_suite='tests',
tests_require=test_requirements
)
|
Python
| 0 |
@@ -156,19 +156,18 @@
'README.
-rst
+md
') as re
@@ -214,115 +214,8 @@
()%0A%0A
-with open('HISTORY.rst') as history_file:%0A history = history_file.read().replace('.. :changelog:', '')%0A%0A
requ
@@ -571,27 +571,8 @@
adme
- + '%5Cn%5Cn' + history
,%0A
|
187dbc9feab320c720c2632c4140a62e2c384328
|
bump version
|
setup.py
|
setup.py
|
#!/usr/bin/env python
# Copyright 2016 IBM All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from setuptools import setup
from setuptools.command.test import test as TestCommand
import os
import sys
__version__ = '0.24.0'
if sys.argv[-1] == 'publish':
# test server
os.system('python setup.py register -r pypitest')
os.system('python setup.py sdist upload -r pypitest')
# production server
os.system('python setup.py register -r pypi')
os.system('python setup.py sdist upload -r pypi')
sys.exit()
# Convert README.md to README.rst for pypi
try:
from pypandoc import convert
def read_md(f):
return convert(f, 'rst')
# read_md = lambda f: convert(f, 'rst')
except ImportError:
print('warning: pypandoc module not found, '
'could not convert Markdown to RST')
def read_md(f):
return open(f, 'rb').read().decode(encoding='utf-8')
# read_md = lambda f: open(f, 'rb').read().decode(encoding='utf-8')
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['--strict', '--verbose', '--tb=long', 'test']
self.test_suite = True
def run_tests(self):
import pytest
errcode = pytest.main(self.test_args)
sys.exit(errcode)
setup(name='watson-developer-cloud',
version=__version__,
description='Client library to use the IBM Watson Services',
license='Apache 2.0',
install_requires=['requests>=2.0, <3.0', 'pysolr>= 3.3, <4.0'],
tests_require=['responses', 'pytest', 'python_dotenv'],
cmdclass={'test': PyTest},
author='Jeffrey Stylos',
author_email='[email protected]',
long_description=read_md('README.md'),
url='https://github.com/watson-developer-cloud/python-sdk',
packages=['watson_developer_cloud'],
include_package_data=True,
keywords='alchemy datanews, language, vision, question and answer' +
' tone_analyzer, natural language classifier, retrieve and '
'rank,' +
' tradeoff analytics, text to speech,' +
' language translation, language identification,' +
' concept expansion, machine translation, personality '
'insights,' +
' message resonance, watson developer cloud, wdc, watson, '
'ibm,' +
' dialog, user modeling, alchemyapi, alchemy, tone analyzer,' +
'speech to text, visual recognition',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Topic :: Software Development :: Libraries :: Python Modules',
'Topic :: Software Development :: Libraries :: Application '
'Frameworks',
],
zip_safe=True
)
|
Python
| 0 |
@@ -768,17 +768,17 @@
_ = '0.2
-4
+5
.0'%0A%0Aif
|
c95234c130435ddd116784ad1829f7bdaa9182c5
|
ADD 138 solutions with A195615(OEIS)
|
100_to_199/euler_138.py
|
100_to_199/euler_138.py
|
Python
| 0 |
@@ -0,0 +1,1478 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A'''%0AProblem 138%0AConsider the isosceles triangle with base length, b = 16, and legs, L = 17.%0A%0A%0ABy using the Pythagorean theorem it can be seen that the height of the triangle, h = %E2%88%9A(172 %E2%88%92 82) = 15, which is one less than the base length.%0AWith b = 272 and L = 305, we get h = 273, which is one more than the base length, and this is the second smallest isosceles triangle with the property that h = b %C2%B1 1.%0AFind %E2%88%91 L for the twelve smallest isosceles triangles for which h = b %C2%B1 1 and b, L are positive integers.%0A'''%0Afrom decimal import Decimal%0Afrom math import modf%0A%0A%0A# Pythagorean approximations%0A# http://oeis.org/A195615 (FORMULA)%0Adef a(n):%0A if n == 0:%0A return 15%0A if n == 1:%0A return 273%0A if n == 2:%0A return 4895%0A return 17 * a(n - 1) + 17 * a(n - 2) - a(n - 3)%0A%0A%0Adef p138():%0A highs = %5Ba(i) for i in range(0, 12)%5D%0A result = %5B%5D%0A for h in highs:%0A hd = h ** 2%0A bd = ((h - 1) // 2) ** 2%0A ret = Decimal(hd + bd).sqrt()%0A ret_float, ret_int = modf(ret)%0A if ret_float == 0.0:%0A # print('%5B-%5D', %5Bh%5D, ret, ret_float, ret_int)%0A result.append(int(ret_int))%0A continue%0A bd = ((h + 1) // 2) ** 2%0A ret = Decimal(hd + bd).sqrt()%0A ret_float, ret_int = modf(ret)%0A if ret_float == 0.0:%0A # print('%5B+%5D', %5Bh%5D, ret, ret_float, ret_int)%0A result.append(int(ret_int))%0A print(sum(result))%0A%0A%0Ap138()%0A
|
|
e7b54968a67bda76546deff546baa49f836cfbaa
|
Add train_fcn32s
|
examples/voc/train_fcn32s.py
|
examples/voc/train_fcn32s.py
|
Python
| 0.000003 |
@@ -0,0 +1,1643 @@
+#!/usr/bin/env python%0A%0Aimport chainer%0Afrom chainer.training import extensions%0A%0Aimport fcn%0A%0A%0Adef main():%0A gpu = 0%0A resume = None # filename%0A%0A # 1. dataset%0A dataset_train = fcn.datasets.PascalVOC2012SegmentationDataset('train')%0A dataset_val = fcn.datasets.PascalVOC2012SegmentationDataset('val')%0A%0A iter_train = chainer.iterators.SerialIterator(dataset_train, batch_size=1)%0A iter_val = chainer.iterators.SerialIterator(dataset_val, batch_size=1)%0A%0A # 2. model%0A vgg_path = fcn.data.download_vgg16_chainermodel%0A vgg = fcn.models.VGG16()%0A chainer.serializers.load_hdf5(vgg_path, vgg)%0A%0A model = fcn.models.FCN32s()%0A fcn.util.copy_chainermodel(vgg, model)%0A%0A if gpu %3E= 0:%0A chainer.cuda.get_device(gpu).use()%0A model.to_gpu()%0A%0A # 3. optimizer%0A optimizer = chainer.optimizers.MomentumSGD(lr=1e-10, momentum=0.99)%0A optimizer.set(model)%0A%0A # 4. trainer%0A max_epoch = 10000%0A updater = chainer.training.StandardUpdater(%0A iter_train, optimizer, device=gpu)%0A trainer = chainer.training.Trainer(%0A updater, (max_epoch, 'epoch'), out='result')%0A%0A trainer.extend(extensions.Evaluator(iter_val, model, device=gpu))%0A trainer.extend(extensions.snapshot(), trigger=(max_epoch, 'epoch'))%0A trainer.extend(extensions.LogReport())%0A trainer.extend(extensions.PrintReport(%0A %5B'epoch', 'main/loss', 'validation/main/loss',%0A 'main/accuracy', 'validation/main/accuracy', 'elapsed_time'%5D))%0A trainer.extend(extensions.ProgressBar())%0A%0A if resume:%0A chainer.serializers.load_hdf5(resume, trainer)%0A%0A trainer.run()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
b01bd1b21f1b12c9120845ec8a85355b038d6b20
|
Add a basic Storage engine to talk to the DB
|
inventory_control/storage.py
|
inventory_control/storage.py
|
Python
| 0 |
@@ -0,0 +1,667 @@
+%22%22%22%0AThis is the Storage engine. It's how everything should talk to the database%0Alayer that sits on the inside of the inventory-control system.%0A%22%22%22%0A%0Aimport MySQLdb%0A%0A%0Aclass StorageEngine(object):%0A %22%22%22%0A Instantiate a DB access object, create all the necessary hooks and%0A then the accessors to a SQL database.%0A%0A %22%22%22%0A%0A def __init__(self, config):%0A self.config = config%0A self.db = MySQLdb.connect(host=self.config%5B'host'%5D,%0A user=self.config%5B'user'%5D,%0A passwd=self.config%5B'password'%5D,%0A db=self.config%5B'db'%5D)%0A self.cursor = self.db.cursor()%0A
|
|
5397bbe4a87dba82dc9fa57abf09a4346aa63f46
|
Add 168 python solution (#38)
|
python/168_Excel_Sheet_Column_Title.py
|
python/168_Excel_Sheet_Column_Title.py
|
Python
| 0 |
@@ -0,0 +1,199 @@
+class Solution:%0A def convertToTitle(self, n: int) -%3E str:%0A res = %22%22%0A while n %3E 0:%0A n -= 1%0A res = chr(65 + n %25 26) + res%0A n //= 26%0A return res%0A
|
|
399daa8ebec14bc4d7ee6c08135e525190e1eb6f
|
Add short Python script that prints as many dummy divs as needed.
|
collections/show-test/print-divs.py
|
collections/show-test/print-divs.py
|
Python
| 0 |
@@ -0,0 +1,130 @@
+# print-divs.py%0A%0Adef printDivs(num):%0A%09for i in range(num):%0A%09%09print('%3Cdiv class=%22item%22%3EItem ' + str(i+1) + '%3C/div%3E')%0A%0AprintDivs(20)
|
|
97883fa22dd8b1207cd533b4dd9e438c83a32a90
|
Update version.
|
mixer/__init__.py
|
mixer/__init__.py
|
"""
Description.
"""
# Module information
# ==================
__version__ = '0.1.0'
__project__ = 'mixer'
__author__ = "horneds <[email protected]>"
__license__ = "BSD"
|
Python
| 0 |
@@ -84,9 +84,9 @@
'0.
-1
+2
.0'%0A
@@ -169,8 +169,9 @@
= %22BSD%22
+%0A
|
2b80b358edd5bcf914d0c709369dbbcfd748772b
|
Add in a test for the marketing_link function in mitxmako
|
common/djangoapps/mitxmako/tests.py
|
common/djangoapps/mitxmako/tests.py
|
Python
| 0.000002 |
@@ -0,0 +1,1032 @@
+from django.test import TestCase%0Afrom django.test.utils import override_settings%0Afrom django.core.urlresolvers import reverse%0Afrom django.conf import settings%0Afrom mitxmako.shortcuts import marketing_link%0Afrom mock import patch%0A%0A%0Aclass ShortcutsTests(TestCase):%0A %22%22%22%0A Test the mitxmako shortcuts file%0A %22%22%22%0A%0A @override_settings(MKTG_URLS=%7B'ROOT': 'dummy-root', 'ABOUT': '/about-us'%7D)%0A @override_settings(MKTG_URL_LINK_MAP=%7B'ABOUT': 'about_edx'%7D)%0A def test_marketing_link(self):%0A # test marketing site on%0A with patch.dict('django.conf.settings.MITX_FEATURES', %7B'ENABLE_MKTG_SITE': True%7D):%0A expected_link = 'dummy-root/about-us'%0A link = marketing_link('ABOUT')%0A self.assertEquals(link, expected_link)%0A # test marketing site off%0A with patch.dict('django.conf.settings.MITX_FEATURES', %7B'ENABLE_MKTG_SITE': False%7D):%0A expected_link = reverse('about_edx')%0A link = marketing_link('ABOUT')%0A self.assertEquals(link, expected_link)%0A
|
|
7236d0358064968b9cbb0ab7f4ee9876dea02aaa
|
add python common functions
|
python/tcp_port_scan/tcp_port_scan.py
|
python/tcp_port_scan/tcp_port_scan.py
|
Python
| 0.000222 |
@@ -0,0 +1,2695 @@
+# -*- coding: utf-8 -*-%0A#!/usr/bin/python%0A##-------------------------------------------------------------------%0A## @copyright 2015 DennyZhang.com%0A## File : tcp_port_scan.py%0A## Author : DennyZhang.com %[email protected]%3E%0A## Description :%0A## --%0A## Created : %3C2016-01-15%3E%0A## Updated: Time-stamp: %3C2016-08-11 23:14:08%3E%0A##-------------------------------------------------------------------%0Aimport argparse%0Aimport subprocess%0Aimport os, sys%0A%0A################################################################################%0A# TODO: move to common library%0Adef strip_comments(string):%0A # remove empty lines and comments (# ...) from string%0A l = %5B%5D%0A for line in string.split(%22%5Cn%22):%0A line = line.strip()%0A if line.startswith(%22#%22) or line == %22%22:%0A continue%0A l.append(line)%0A return %22%5Cn%22.join(l)%0A%0Adef string_remove(string, opt_list):%0A l = %5B%5D%0A # remove entries from string%0A for line in string.split(%22%5Cn%22):%0A should_remove = False%0A for item in opt_list:%0A if item in line:%0A should_remove = True%0A if should_remove is False:%0A l.append(line)%0A return %22%5Cn%22.join(l)%0A%0A# TODO: common logging%0A################################################################################%0Anmap_command = %22sudo nmap -sS -PN %25s%22 # (%22-p T:XXX,XXX 192.168.0.16%22)%0A%0Aresult_dict = %7B%7D%0A%0Adef nmap_check(server_ip, ports):%0A if ports == %22%22:%0A nmap_opts = server_ip%0A else:%0A nmap_opts = %22-p %25s %25s%22 %25 (ports, server_ip)%0A%0A command = nmap_command %25 (nmap_opts)%0A print %22Run: %25s%22 %25 (command)%0A nmap_output = subprocess.check_output(command, shell=True)%0A return cleanup_nmap_output(nmap_outputoutput, server_ip)%0A%0Adef cleanup_nmap_output(nmap_output, server_ip):%0A return nmap_output%0A%0Adef audit_open_ports(port_list, whitelist):%0A return%0A%0A################################################################################%0Aif __name__=='__main__':%0A # Sample:%0A # python ./tcp_port_scan.py --server_list_file XXX --port_list_file XXXX --white_list_file XXX%0A parser = argparse.ArgumentParser()%0A parser.add_argument('--server_list_file', required=True,%0A help=%22ip list to scan%22, type=str)%0A parser.add_argument('--port_list_file', required=True,%0A help=%22customized tcp ports to scan%22, type=str)%0A parser.add_argument('--white_list_file', required=True,%0A help=%22safe ports to allow open%22, type=str)%0A args = parser.parse_args()%0A server_list_file = args.server_list_file%0A port_list_file = args.port_list_file%0A white_list_file = args.white_list_file%0A%0A print nmap_check(%22104.131.129.100%22, %22%22)%0A## File : tcp_port_scan.py ends%0A
|
|
c61850de298a1f40dd84d95d758d3c3faed38160
|
Add safe_decode utility function
|
nose2/util.py
|
nose2/util.py
|
import os
import re
import sys
try:
from compiler.consts import CO_GENERATOR
except ImportError:
# IronPython doesn't have a complier module
CO_GENERATOR=0x20
try:
from inspect import isgeneratorfunction # new in 2.6
except ImportError:
import inspect
# backported from Python 2.6
def isgeneratorfunction(func):
return bool((inspect.isfunction(func) or inspect.ismethod(func)) and
func.func_code.co_flags & CO_GENERATOR)
IDENT_RE = re.compile(r'^[_a-zA-Z]\w*$r', re.UNICODE)
VALID_MODULE_RE = re.compile(r'[_a-zA-Z]\w*\.py$', re.UNICODE)
def ln(label, char='-', width=70):
"""Draw a divider, with label in the middle.
>>> ln('hello there')
'---------------------------- hello there -----------------------------'
Width and divider char may be specified. Defaults are 70 and '-'
respectively.
"""
label_len = len(label) + 2
chunk = (width - label_len) // 2
out = '%s %s %s' % (char * chunk, label, char * chunk)
pad = width - len(out)
if pad > 0:
out = out + (char * pad)
return out
def valid_module_name(path):
return VALID_MODULE_RE.search(path)
def name_from_path(path):
# back up to find module root
parts = []
path = os.path.normpath(path)
base = os.path.splitext(path)[0]
candidate, top = os.path.split(base)
parts.append(top)
while candidate:
if ispackage(candidate):
candidate, top = os.path.split(candidate)
parts.append(top)
else:
break
return '.'.join(reversed(parts))
def module_from_name(name):
__import__(name)
return sys.modules[name]
def ispackage(path):
"""
Is this path a package directory?
"""
if os.path.isdir(path):
# at least the end of the path must be a legal python identifier
# and __init__.py[co] must exist
end = os.path.basename(path)
if IDENT_RE.match(end):
for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
if os.path.isfile(os.path.join(path, init)):
return True
if sys.platform.startswith('java') and \
os.path.isfile(os.path.join(path, '__init__$py.class')):
return True
return False
|
Python
| 0.000233 |
@@ -473,16 +473,28 @@
RATOR)%0A%0A
+import six%0A%0A
%0AIDENT_R
@@ -2307,8 +2307,285 @@
n False%0A
+%0A%0Adef safe_decode(string):%0A if string is None:%0A return string%0A try:%0A return string.decode()%0A except UnicodeDecodeError:%0A pass%0A try:%0A return string.decode('utf-8')%0A except UnicodeDecodeError:%0A return six.u('%3Cunable to decode%3E')%0A
|
a119c9f53babd87f5e5adc1886256c59a21c19a5
|
Move content_type formatting support to a different module
|
hug/format.py
|
hug/format.py
|
Python
| 0.000001 |
@@ -0,0 +1,223 @@
+def content_type(content_type):%0A '''Attaches an explicit HTML content type to a Hug formatting function'''%0A def decorator(method):%0A method.content_type = content_type%0A return method%0A return decorator%0A
|
|
ef803b8ac95bb2440d1d312584376149573ac798
|
Create bbgdailyhistory.py
|
BBG/bbgdailyhistory.py
|
BBG/bbgdailyhistory.py
|
Python
| 0.002245 |
@@ -0,0 +1,2933 @@
+# *- bbgdailyhistory.py -*%0A%0Aimport os%0Aimport numpy as np%0Aimport pandas as pd%0Aimport blpapi%0A%0A%0Aclass BBGDailyHistory:%0A '''%0A Parameters%0A ----------%0A sec : str%0A Ticker%0A fields : str or list%0A Field of list of fields ('PX_HIGH', 'PX_LOW', etc...)%0A start : str%0A Start date%0A end : stf%0A End date%0A '''%0A %0A def __init__(self, sec, fields, start=None, end=None):%0A #self.rqst = rqst%0A self.sec = sec%0A self.fields = fields%0A self.start = start%0A self.end = end%0A%0A %0A def get_data(self) -%3E pd.DataFrame:%0A '''%0A Returns%0A -------%0A data : pd.DataFrame()%0A The historical data queried returned in a dataFrame presented as%0A long format%0A '''%0A # Session management%0A sess = blpapi.Session()%0A sess.start()%0A%0A # Define data type%0A sess.openService('//blp/refdata')%0A service = sess.getService('//blp/refdata')%0A%0A # Create request%0A request = service.createRequest('HistoricalDataRequest')%0A%0A # Optional request setters%0A request.set('startDate', self.start)%0A request.set('endDate', self.end)%0A request.getElement('securities').appendValue(self.sec)%0A%0A # Data holders %0A date_acc =%5B%5D%0A ticker_acc = %5B%5D%0A field_acc = %5B%5D%0A value_acc = %5B%5D%0A%0A # Loop over fields%0A for fie in self.fields:%0A request.getElement('fields').appendValue(fie)%0A sess.sendRequest(request)%0A endReached = False%0A while endReached == False:%0A event = sess.nextEvent(500)%0A if event.eventType() == blpapi.Event.RESPONSE or event.eventType() == blpapi.Event.PARTIAL_RESPONSE:%0A for msg in event:%0A fieldData = msg.getElement('securityData').getElement('fieldData')%0A for data in fieldData.values():%0A for fld in self.fields:%0A date_acc.append(data.getElement('date').getValue())%0A field_acc.append(fld)%0A value_acc.append(data.getElement(fld).getValue())%0A ticker_acc.append(self.sec)%0A %0A if event.eventType() == blpapi.Event.RESPONSE:%0A endReached = True%0A %0A sess.stop()%0A%0A data = pd.DataFrame(%7B'timestamp' : date_acc,%0A 'ticker' : ticker_acc,%0A 'field' : fie,%0A 'value' : value_acc%7D)%0A%0A return data%0A%0A%0A%0Aif __name__ == %22__main__%22:%0A # Use example of BBGHistory%0A #from bbgdatapuller import BBGHistory # Expect folder issue%0A security = 'SIMA SW Equity' #'SIMA SW Equity'%0A fields = %5B'PX_OPEN', 'PX_HIGH', 'PX_LOW', 'PX_LAST'%5D%0A start = '20200105'%0A end = '20200109'%0A d = BBGDailyHistory(sec=security, fields=fields, start=start, end=end).get_data()%0A print(d.head())%0A
|
|
353868bc281ade826b48d2c5a79ad14986c0d35c
|
Create lowercaseLists.py
|
Bits/lowercaseLists.py
|
Bits/lowercaseLists.py
|
Python
| 0 |
@@ -0,0 +1,478 @@
+#!/usr/bin/env python%0A%0Adocs = %5B%22The Corporation%22, %22Valentino: The Last Emperor%22, %22Kings of Patsry%22%5D%0Amovies = %5B%22The Talented Mr. Ripley%22, %22The Network%22, %22Silence of the Lambs%22, %22Wall Street%22, %22Marie Antoinette%22, %22My Mana Godfrey%22, %22Rope%22, %22Sleuth%22%5D%0A%0Afilms = %5B%5Bdocs%5D, %5Bmovies%5D%5D%0Amovies%5B5%5D = %22My Man Godfrey%22%0Adocs%5B-1%5D = %22Kings of Pastry%22%0A%0A%0Ay = %5Bx.lower() for x in %5B%22A%22,%22B%22,%22C%22%5D%5D%0A%0Aprint(y)%0A%0AnewFilmsList = %5Bx.lower() for x in docs%5D + %5Bx.lower() for x in movies%5D %0Aprint(newFilmsList)%0A
|
|
9afd1a8d3584e45d32858c3b8fa44efd0f1a09f1
|
add unit test for ofproto automatic detection
|
ryu/tests/unit/ofproto/test_ofproto.py
|
ryu/tests/unit/ofproto/test_ofproto.py
|
Python
| 0 |
@@ -0,0 +1,2429 @@
+# Copyright (C) 2013 Nippon Telegraph and Telephone Corporation.%0A# Copyright (C) 2013 Isaku Yamahata %3Cyamahata at private email ne jp%3E%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or%0A# implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A%0Aimport unittest%0Aimport logging%0Afrom nose.tools import eq_%0A%0A%0ALOG = logging.getLogger('test_ofproto')%0A%0A%0Aclass TestOfprotCommon(unittest.TestCase):%0A %22%22%22 Test case for ofproto%0A %22%22%22%0A%0A def test_ofp_event(self):%0A import ryu.ofproto%0A reload(ryu.ofproto)%0A import ryu.controller.ofp_event%0A reload(ryu.controller.ofp_event)%0A%0A def test_ofproto(self):%0A # When new version of OFP support is added,%0A # this test must be updated.%0A import ryu.ofproto%0A reload(ryu.ofproto)%0A ofp_modules = ryu.ofproto.get_ofp_modules()%0A%0A import ryu.ofproto.ofproto_v1_0%0A import ryu.ofproto.ofproto_v1_2%0A import ryu.ofproto.ofproto_v1_3%0A eq_(set(ofp_modules.keys()), set(%5Bryu.ofproto.ofproto_v1_0.OFP_VERSION,%0A ryu.ofproto.ofproto_v1_2.OFP_VERSION,%0A ryu.ofproto.ofproto_v1_3.OFP_VERSION,%0A %5D))%0A consts_mods = set(%5Bofp_mod%5B0%5D for ofp_mod in ofp_modules.values()%5D)%0A eq_(consts_mods, set(%5Bryu.ofproto.ofproto_v1_0,%0A ryu.ofproto.ofproto_v1_2,%0A ryu.ofproto.ofproto_v1_3,%0A %5D))%0A%0A parser_mods = set(%5Bofp_mod%5B1%5D for ofp_mod in ofp_modules.values()%5D)%0A import ryu.ofproto.ofproto_v1_0_parser%0A import ryu.ofproto.ofproto_v1_2_parser%0A import ryu.ofproto.ofproto_v1_3_parser%0A eq_(parser_mods, set(%5Bryu.ofproto.ofproto_v1_0_parser,%0A ryu.ofproto.ofproto_v1_2_parser,%0A ryu.ofproto.ofproto_v1_3_parser,%0A %5D))%0A
|
|
81afc4ed6d7390567dfe9949c9f332b36a6add9c
|
Add lang install es_ES
|
l10n_cr_base/l10n_cr_base.py
|
l10n_cr_base/l10n_cr_base.py
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# l10n_cr_base.py
# l10n_cr_base
# First author: Carlos Vásquez <[email protected]> (ClearCorp S.A.)
# Copyright (c) 2010-TODAY ClearCorp S.A. (http://clearcorp.co.cr). All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification, are
# permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this list of
# conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this list
# of conditions and the following disclaimer in the documentation and/or other materials
# provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY <COPYRIGHT HOLDER> ``AS IS'' AND ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are those of the
# authors and should not be interpreted as representing official policies, either expressed
# or implied, of ClearCorp S.A..
#
##############################################################################
from osv import osv,fields
from base.res.partner.partner import _lang_get
class res_partner_function(osv.osv):
'''
Inherits res.partner.function to add translation to code and name fields
'''
_inherit = 'res.partner.function'
_columns = {
'name': fields.char('Function Name', size=64, required=True, translate=True),
'code': fields.char('Code', size=8, required=True, translate=True),
}
res_partner_function()
class res_partner_title(osv.osv):
'''
Inherits res.partner.title to add translation to shortcut field
'''
_inherit = 'res.partner.title'
_columns = {
'shortcut': fields.char('Shortcut', required=True, size=16, translate=True),
}
res_partner_title()
class res_partner(osv.osv):
'''
Inherits res.partner to add id_number field
'''
_inherit = 'res.partner'
_columns = {
'id_number': fields.char('ID Number', size=30,required=False, select=1),
'lang': fields.selection(_lang_get, 'Language', size=5, required=True, help="If the selected language is loaded in the system, all documents related to this partner will be printed in this language. If not, it will be english."),
}
_defaults = {
'lang': lambda *a: 'es_ES',
}
res_partner()
|
Python
| 0 |
@@ -3252,8 +3252,294 @@
rtner()%0A
+%0Adef _lang_es_install(self, cr, uid, data, context):%0A lang = 'es_ES'%0A modobj = pooler.get_pool(cr.dbname).get('ir.module.module')%0A mids = modobj.search(cr, uid, %5B('state', '=', 'installed')%5D)%0A modobj.update_translations(cr, uid, mids, lang)%0A return %7B%7D%0A_lang_es_install()%0A
|
e1810dcfd635198363838ed5c4dcd92c1cef1b07
|
use wikistats lib to update languages_by_size
|
scripts/maintenance/wikimedia_sites.py
|
scripts/maintenance/wikimedia_sites.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Script that updates the language lists in Wikimedia family files."""
#
# (C) xqt, 2009-2014
# (C) Pywikibot team, 2008-2014
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, unicode_literals
__version__ = '$Id$'
#
import codecs
import re
import requests
from xml.etree import cElementTree
import pywikibot
from pywikibot.family import Family
URL = 'https://wikistats.wmflabs.org/api.php?action=dump&table=%s&format=xml'
familiesDict = {
'anarchopedia': 'anarchopedias',
'wikibooks': 'wikibooks',
'wikinews': 'wikinews',
'wikipedia': 'wikipedias',
'wikiquote': 'wikiquotes',
'wikisource': 'wikisources',
'wikiversity': 'wikiversity',
'wikivoyage': 'wikivoyage',
'wiktionary': 'wiktionaries',
}
exceptions = ['-']
def update_family(families):
"""Update family files."""
for family in families or familiesDict.keys():
pywikibot.output('\nChecking family %s:' % family)
original = Family.load(family).languages_by_size
obsolete = Family.load(family).obsolete
feed = requests.get(URL % familiesDict[family])
tree = cElementTree.parse(feed)
new = []
for field in tree.findall('row/field'):
if field.get('name') == 'prefix':
code = field.text
if not (code in obsolete or code in exceptions):
new.append(code)
continue
# put the missing languages to the right place
missing = original != new and set(original) - set(new)
if missing:
pywikibot.warning("['%s'] not listed at wikistats."
% "', '".join(missing))
index = {}
for code in missing:
index[original.index(code)] = code
i = len(index) - 1
for key in sorted(index.keys(), reverse=True):
new.insert(key - i, index[key])
i -= 1
if original == new:
pywikibot.output(u'The lists match!')
else:
pywikibot.output(u"The lists don't match, the new list is:")
text = u' self.languages_by_size = [\r\n'
line = ' ' * 11
for code in new:
if len(line) + len(code) <= 76:
line += u" '%s'," % code
else:
text += u'%s\r\n' % line
line = ' ' * 11
line += u" '%s'," % code
text += u'%s\r\n' % line
text += u' ]'
pywikibot.output(text)
family_file_name = 'pywikibot/families/%s_family.py' % family
family_file = codecs.open(family_file_name, 'r', 'utf8')
family_text = family_file.read()
old = re.findall(r'(?msu)^ {8}self.languages_by_size.+?\]',
family_text)[0]
family_text = family_text.replace(old, text)
family_file = codecs.open(family_file_name, 'w', 'utf8')
family_file.write(family_text)
family_file.close()
if __name__ == '__main__':
fam = []
for arg in pywikibot.handleArgs():
if arg in familiesDict.keys() and arg not in fam:
fam.append(arg)
update_family(fam)
|
Python
| 0 |
@@ -129,17 +129,17 @@
2009-201
-4
+6
%0A# (C) P
@@ -165,9 +165,9 @@
-201
-4
+6
%0A#%0A#
@@ -335,71 +335,55 @@
ort
-requests%0A%0Afrom xml.etree import cElementTree%0A%0A
+pywikibot%0A%0Afrom pywikibot.data
import
-py
wiki
-bot%0A
+stats
%0Afro
@@ -420,86 +420,43 @@
ly%0A%0A
-URL = 'https://wikistats.wmflabs.org/api.php?action=dump&table=%25s&format=xml'%0A
+# supported families by this script
%0Afam
@@ -464,16 +464,17 @@
lies
-Dic
+_lis
t =
-%7B
+%5B
%0A
@@ -492,25 +492,8 @@
dia'
-: 'anarchopedias'
,%0A
@@ -509,123 +509,57 @@
oks'
-: 'wikibooks',%0A 'wikinews': 'wikinews',%0A 'wikipedia': 'wikipedias',%0A 'wikiquote': 'wikiquotes
+,%0A 'wikinews',%0A 'wikipedia',%0A 'wikiquote
',%0A
@@ -577,25 +577,8 @@
rce'
-: 'wikisources'
,%0A
@@ -596,43 +596,11 @@
ity'
-: 'wikiversity',%0A 'wikivoyage':
+,%0A
'
@@ -632,29 +632,11 @@
ary'
-: 'wiktionaries'
,%0A
-%7D
+%5D
%0A%0Aex
@@ -714,16 +714,47 @@
les.%22%22%22%0A
+ ws = wikistats.WikiStats()%0A
for
@@ -783,27 +783,21 @@
families
-Dict.keys()
+_list
:%0A
@@ -968,55 +968,16 @@
-feed = requests.get(URL %25 familiesDict%5Bfamily%5D)
+new = %5B%5D
%0A
@@ -986,188 +986,71 @@
t
-re
+abl
e =
-cElementTree.parse(feed)%0A%0A new = %5B%5D%0A for field in tree.findall('row/field'):%0A if field.get('name') == 'prefix':%0A code = field.text%0A
+ws.languages_by_size(family)%0A for code in table:%0A
@@ -1057,17 +1057,16 @@
-
if not (
@@ -1118,28 +1118,24 @@
-
new.append(c
@@ -1142,33 +1142,8 @@
ode)
-%0A continue
%0A%0A
@@ -2839,18 +2839,21 @@
fam =
-%5B%5D
+set()
%0A for
@@ -2880,9 +2880,10 @@
ndle
-A
+_a
rgs(
@@ -2915,38 +2915,13 @@
lies
-Dict.keys() and arg not in fam
+_list
:%0A
@@ -2935,20 +2935,17 @@
fam.a
-ppen
+d
d(arg)%0A
|
657591afce265521078a7cb2f84347c2319b6b33
|
Add tests to help with autograding
|
nbgrader/tests.py
|
nbgrader/tests.py
|
Python
| 0 |
@@ -0,0 +1,445 @@
+import nose.tools%0Aimport numpy as np%0A%0A%0Adef assert_unequal(a, b, msg=%22%22):%0A if a == b:%0A raise AssertionError(msg)%0A%0A%0Adef assert_same_shape(a, b):%0A a_ = np.array(a, copy=False)%0A b_ = np.array(b, copy=False)%0A assert a_.shape == b_.shape, %22%7B%7D != %7B%7D%22.format(a_.shape, b_.shape)%0A%0A%0Adef assert_allclose(a, b):%0A assert np.allclose(a, b), %22%7B%7D != %7B%7D%22.format(a, b)%0A%0Aassert_equal = nose.tools.eq_%0Aassert_raises = nose.tools.assert_raises%0A
|
|
5e7746d054f7762d93e1f70296fa3b43f882553c
|
Add synthtool scripts (#3765)
|
java-bigquerydatatransfer/google-cloud-bigquerydatatransfer/synth.py
|
java-bigquerydatatransfer/google-cloud-bigquerydatatransfer/synth.py
|
Python
| 0.000001 |
@@ -0,0 +1,1357 @@
+# Copyright 2018 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%22%22%22This script is used to synthesize generated parts of this library.%22%22%22%0A%0Aimport synthtool as s%0Aimport synthtool.gcp as gcp%0A%0Agapic = gcp.GAPICGenerator()%0Acommon_templates = gcp.CommonTemplates()%0A%0Alibrary = gapic.java_library(%0A service='bigquerydatatransfer',%0A version='v1',%0A config_path='/google/cloud/bigquery/datatransfer/artman_bigquerydatatransfer.yaml',%0A artman_output_name='google-cloud-bigquerydatatransfer-v1')%0A%0As.copy(library / 'gapic-google-cloud-bigquerydatatransfer-v1/src', 'src')%0As.copy(library / 'grpc-google-cloud-bigquerydatatransfer-v1/src', '../../google-api-grpc/grpc-google-cloud-bigquerydatatransfer-v1/src')%0As.copy(library / 'proto-google-cloud-bigquerydatatransfer-v1/src', '../../google-api-grpc/proto-google-cloud-bigquerydatatransfer-v1/src')%0A
|
|
c13ec330194612832dfb0953d3e561a0ac151d69
|
add irrigation baseline file gen scripts
|
scripts/RT/create_irrigation_files.py
|
scripts/RT/create_irrigation_files.py
|
Python
| 0 |
@@ -0,0 +1,814 @@
+%22%22%22Create the generalized irrigation files, for now.%0A%0Ahttps://www.ars.usda.gov/ARSUserFiles/50201000/WEPP/usersum.pdf page 60%0A%22%22%22%0Afrom datetime import date%0A%0ALASTYEAR = date.today().year%0A%0A%0Adef main():%0A %22%22%22Create files.%22%22%22%0A for ofecnt in range(1, 7): # Do we have more than 6 OFEs?%0A fn = f%22/i/0/irrigation/ofe%7Bofecnt%7D.txt%22%0A with open(fn, %22w%22, encoding=%22utf-8%22) as fh:%0A fh.write(%2295.7%5Cn%22) # datver%0A fh.write(f%22%7Bofecnt%7D 2 1%5Cn%22) # furrow depletion%0A fh.write(%220.013 0.025%5Cn%22) # mindepth maxdepth%0A for year in range(2007, LASTYEAR + 1):%0A for ofe in range(1, ofecnt):%0A fh.write(%0A f%22%7Bofe%7D 0.176E-05 1.3 0.5 1.0 175 %7Byear%7D 185 %7Byear%7D%5Cn%22%0A )%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
1b9aa5ccd500e17aa32c315e212068c8be96216c
|
Add profiler, now not import. thanks @tweekmoster!
|
rplugin/python3/deoplete/sources/deoplete_go/profiler.py
|
rplugin/python3/deoplete/sources/deoplete_go/profiler.py
|
Python
| 0 |
@@ -0,0 +1,1369 @@
+import functools%0Aimport queue%0A%0Atry:%0A import statistics%0A stdev = statistics.stdev%0A mean = statistics.mean%0Aexcept ImportError:%0A stdev = None%0A%0A def mean(l):%0A return sum(l) / len(l)%0A%0Atry:%0A import time%0A clock = time.perf_counter%0Aexcept Exception:%0A import timeit%0A clock = timeit.default_timer%0A%0A%0Aclass tfloat(float):%0A color = 39%0A%0A def __str__(self):%0A n = self * 1000%0A return '%5Cx1b%5B%25dm%25f%5Cx1b%5Bmms' %25 (self.color, n)%0A%0A%0Adef profile(func):%0A name = func.__name__%0A samples = queue.deque(maxlen=5)%0A%0A @functools.wraps(func)%0A def wrapper(self, *args, **kwargs):%0A if not self.debug_enabled:%0A return func(self, *args, **kwargs)%0A start = clock()%0A ret = func(self, *args, **kwargs)%0A n = tfloat(clock() - start)%0A%0A if len(samples) %3C 2:%0A m = 0%0A d = 0%0A n.color = 36%0A else:%0A m = mean(samples)%0A if stdev:%0A d = tfloat(stdev(samples))%0A else:%0A d = 0%0A%0A if n %3C= m + d:%0A n.color = 32%0A elif n %3E m + d * 2:%0A n.color = 31%0A else:%0A n.color = 33%0A samples.append(n)%0A self.info('%5Cx1b%5B34m%25s%5Cx1b%5Bm t = %25s, %5Cu00b5 = %25s, %5Cu03c3 = %25s)',%0A name, n, m, d)%0A return ret%0A return wrapper%0A
|
|
e9f2e966361d8a23c83fbbbb4a4b3d4046203a16
|
Test script for the heart container
|
CERR_core/Contouring/models/heart/test/test.py
|
CERR_core/Contouring/models/heart/test/test.py
|
Python
| 0 |
@@ -0,0 +1,612 @@
+#Test script for heart container testing if all the imports are successful%0A%0Aimport sys%0Aimport os%0Aimport numpy as np%0Aimport h5py%0Aimport fnmatch%0Afrom modeling.sync_batchnorm.replicate import patch_replication_callback%0Afrom modeling.deeplab import *%0Afrom torchvision.utils import make_grid%0Afrom dataloaders.utils import decode_seg_map_sequence%0Afrom skimage.transform import resize%0Afrom dataloaders import custom_transforms as tr%0Afrom PIL import Image%0Afrom torchvision import transforms%0A%0Ainput_size = 512%0A%0A%0Adef main(argv):%0A print(%22All imports done. Test Successful%22)%0A%0Aif __name__ == %22__main__%22:%0A main(sys.argv)
|
|
b223c8be2bcb11d529a07997c05a9c5ab2b183b2
|
Add basic tests for run length encoding printable
|
csunplugged/tests/resources/generators/test_run_length_encoding.py
|
csunplugged/tests/resources/generators/test_run_length_encoding.py
|
Python
| 0 |
@@ -0,0 +1,3175 @@
+from unittest import mock%0Afrom django.http import QueryDict%0Afrom django.test import tag%0Afrom resources.generators.RunLengthEncodingResourceGenerator import RunLengthEncodingResourceGenerator%0Afrom tests.resources.generators.utils import BaseGeneratorTest%0A%0A%0A@tag(%22resource%22)%0Aclass RunLengthEncodingResourceGeneratorTest(BaseGeneratorTest):%0A%0A def __init__(self, *args, **kwargs):%0A super().__init__(*args, **kwargs)%0A self.language = %22en%22%0A%0A def test_worksheet_version_values(self):%0A query = QueryDict(%22worksheet_type=student-basic&paper_size=a4%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.run_parameter_smoke_tests(generator, %22worksheet_type%22)%0A%0A def test_subtitle_student_basic_a4(self):%0A query = QueryDict(%22worksheet_type=student-basic&paper_size=a4%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Student Worksheet - Kid Fax - a4%22%0A )%0A%0A def test_subtitle_student_basic_letter(self):%0A query = QueryDict(%22worksheet_type=student-basic&paper_size=letter%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Student Worksheet - Kid Fax - letter%22%0A )%0A%0A def test_subtitle_student_create_a4(self):%0A query = QueryDict(%22worksheet_type=student-create&paper_size=a4%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Student Worksheet - Create your own - a4%22%0A )%0A%0A def test_subtitle_student_create_letter(self):%0A query = QueryDict(%22worksheet_type=student-create&paper_size=letter%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Student Worksheet - Create your own - letter%22%0A )%0A%0A def test_subtitle_student_create_colour_a4(self):%0A query = QueryDict(%22worksheet_type=student-create-colour&paper_size=a4%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Student Worksheet - Create your own in colour - a4%22%0A )%0A%0A def test_subtitle_student_create_colour_letter(self):%0A query = QueryDict(%22worksheet_type=student-create-colour&paper_size=letter%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Student Worksheet - Create your own in colour - letter%22%0A )%0A%0A def test_subtitle_student_teacher_a4(self):%0A query = QueryDict(%22worksheet_type=teacher&paper_size=a4%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Teacher Worksheet - a4%22%0A )%0A%0A def test_subtitle_student_teacher_letter(self):%0A query = QueryDict(%22worksheet_type=teacher&paper_size=letter%22)%0A generator = RunLengthEncodingResourceGenerator(query)%0A self.assertEqual(%0A generator.subtitle,%0A %22Teacher Worksheet - letter%22%0A )%0A
|
|
24c3166906c8431523c641721e635fdc28fd91ce
|
add server that tests if a cookie was set
|
cookiescheck-test-server.py
|
cookiescheck-test-server.py
|
Python
| 0.000001 |
@@ -0,0 +1,643 @@
+import sys%0A%0Afrom flask import Flask, request, send_from_directory, make_response, abort%0Aapp = Flask(__name__)%0A%0Afilepath = None%0Amainpath = None%0A%[email protected]('/%3Cpath:path%3E')%0Adef get(path):%0A ret = make_response(send_from_directory(filepath, path))%0A%0A if path == mainpath:%0A ret.set_cookie('auth', '1')%0A elif request.cookies.get('auth') == '1':%0A pass%0A else:%0A abort(403)%0A%0A return ret%0A%0Aif __name__ == %22__main__%22:%0A if len(sys.argv) != 3:%0A print %22Usage: %25s %3Cdir-to-serve%3E %3Cmain-file%3E%22%0A sys.exit(1)%0A%0A print sys.argv%0A filepath = sys.argv%5B1%5D%0A mainpath = sys.argv%5B2%5D%0A app.run(host='0.0.0.0')%0A%0A
|
|
c011154135a73db2c5bba247fc33f94032553f2e
|
Correct package files
|
janitor/__init__.py
|
janitor/__init__.py
|
Python
| 0.000082 |
@@ -0,0 +1,93 @@
+import utils%0A%0Autils = utils%0Alogger, logger_api = utils.logger.setup_loggers(%0A %22janitor%22%0A)%0A
|
|
b6aedc1589c754bb867381e309aba5ae19f7bb1a
|
Create GDAL_SaveRaster.py
|
GDAL_SaveRaster.py
|
GDAL_SaveRaster.py
|
Python
| 0 |
@@ -0,0 +1,1331 @@
+%0Afrom osgeo import gdal%0A%0A%0A%0Adef save_raster ( output_name, raster_data, dataset, driver=%22GTiff%22 ):%0A %22%22%22%0A A function to save a 1-band raster using GDAL to the file indicated%0A by %60%60output_name%60%60. It requires a GDAL-accesible dataset to collect%0A the projection and geotransform.%0A %22%22%22%0A%0A # Open the reference dataset%0A g_input = gdal.Open ( dataset )%0A # Get the Geotransform vector%0A geo_transform = g_input.GetGeoTransform ()%0A x_size = g_input.RasterXSize # Raster xsize%0A y_size = g_input.RasterYSize # Raster ysize%0A srs = g_input.GetProjectionRef () # Projection%0A # Need a driver object. By default, we use GeoTIFF%0A if driver == %22GTiff%22:%0A driver = gdal.GetDriverByName ( driver )%0A dataset_out = driver.Create ( output_name, x_size, y_size, 1, %5C%0A gdal.GDT_Float32, %5B'TFW=YES', %5C%0A 'COMPRESS=LZW', 'TILED=YES'%5D )%0A else:%0A driver = gdal.GetDriverByName ( driver )%0A dataset_out = driver.Create ( output_name, x_size, y_size, 1, %5C%0A gdal.GDT_Float32 )%0A%0A dataset_out.SetGeoTransform ( geo_transform )%0A dataset_out.SetProjection ( srs )%0A dataset_out.GetRasterBand ( 1 ).WriteArray ( %5C%0A raster_data.astype(np.float32) )%0A dataset_out.GetRasterBand ( 1 ).SetNoDataValue ( float(-999) )%0A dataset_out = None%0A
|
|
542731f7fb3f5d09c4de4340f7ce18b7cbf41172
|
Create Client.py
|
Client.py
|
Client.py
|
Python
| 0.000001 |
@@ -0,0 +1,206 @@
+from Networking import Client%0A%0Aclient = Client()%0Aclient.connect('10.42.42.25', 12345).send(%7B'Ordre':'Timelapse', 'Action':%5B%22/home/pi/photo3%22, 24, 30, 0.25, False%5D%7D)%0Areponse = client.recv()%0Aclient.close()%0A%0A%0A
|
|
745adf9898e6dc80d37f1a0c3c4361acf76f2feb
|
Create main.py
|
main.py
|
main.py
|
Python
| 0.000001 |
@@ -0,0 +1,1471 @@
+import webapp2%0Aimport logging%0Aimport json%0Aimport utils%0Aimport re%0Aimport advanced%0A%0Aclass show_search_results(utils.BaseHandler):%0A def post(self):%0A #get info about slack post%0A token = self.request.get('token')%0A channel = self.request.get('channel')%0A text = self.request.get('text')%0A user = self.request.get('user_name')%0A user_id = self.request.get('user_id')%0A%0A #verify that the call to app is being made by an authorized slack slash command%0A if token == 'your_token':%0A %0A #extract the search term from the command and build the resulting search link%0A query_name = re.match(%22%5B%5E%5Cs%5D+%22, text)%0A%0A if query_name is not None:%0A query_name = image_name.group(0)%0A query_link = %22%3Chttps://google.com/q?=%7B%7D%22.format(query_name)%0A self.response.out.write(%22%22.format(query_link))%0A%0A #call the Slack incoming webhook%0A url = %22your_incoming_webhooks_url%22%0A payload = json.dumps(%0A %7B%22channel%22:channel, %22username%22:%22Highfive%22, %22text%22:%22%22.format(query_link)%7D)%0A result = urlfetch.fetch(url=url, %0A method=urlfetch.POST,%0A payload=payload)%0A%0Aapp = webapp2.WSGIApplication(%5B%0A ('/slack-five', query_link, debug=True)%5D) %0A
|
|
42753fc71b6a7cbe8697ba0eb053fdbc39c852a1
|
add test_eval
|
misc/test_eval.py
|
misc/test_eval.py
|
Python
| 0 |
@@ -0,0 +1,532 @@
+%0A# eval%0Adef main():%0A dictString = %22%7B'Define1':%5B%5B63.3,0.00,0.5,0.3,0.0%5D,%5B269.3,0.034,1.0,1.0,0.5%5D,%22 %5C%0A %22%5B332.2,0.933,0.2,0.99920654296875,1%5D,%5B935.0,0.990,0.2,0.1,1.0%5D%5D,%22 %5C%0A %22'Define2':%5B%5B63.3,0.00,0.5,0.2,1.0%5D,%5B269.3,0.034,1.0,0.3,0.5%5D,%22 %5C%0A %22%5B332.2,0.933,0.2, 0.4,0.6%5D,%5B935.0,0.990,1.0, 0.5,0.0%5D%5D,%7D%22%0A dict = eval(dictString)%0A%0A print(%22keys: %22, dict.keys())%0A print(%22Define1 value %22, dict%5B'Define1'%5D)%0A%0A# execfile%0Aexecfile(%22test_list.py%22)%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
a9da84352d6ff8b26a8e25ac9d15d5737c84225f
|
Add problem 12
|
problem_12.py
|
problem_12.py
|
Python
| 0.000203 |
@@ -0,0 +1,2826 @@
+from crypto_library import ecb_aes%0Afrom problem_11 import distinguish_encryption_mode%0Afrom string import printable%0A'''%0Afrom crypto_library import BLOCKSIZE%0Aimport random%0A%0AENCRYPTION_KEY = ''.join(random.choice(printable) for _ in range(BLOCKSIZE))%0A'''%0A%0A%0Adef new_encryption_oracle(adversary_input):%0A ENCRYPTION_KEY = ',y!3%3CCWn@1?wwF%5D%5Cx0b'%0A unknown_input = 'Um9sbGluJyBpbiBteSA1LjAKV2l0aCBteSByYWctdG9wIGRvd24gc28gbXkgaGFpciBjYW4gYmxvdwpUaGUgZ2lybGllcyBvbiBzdGFuZGJ5IHdhdmluZyBqdXN0IHRvIHNheSBoaQpEaWQgeW91IHN0b3A/IE5vLCBJIGp1c3QgZHJvdmUgYnkK'%0A return ecb_aes(adversary_input+unknown_input, ENCRYPTION_KEY)%0A%0A%0Adef find_blocksize():%0A adversary_input = ''%0A previous_length = len(new_encryption_oracle(adversary_input))%0A found_block_change = False%0A while True:%0A adversary_input += '0'%0A current_length = len(new_encryption_oracle(adversary_input))%0A if current_length %3E previous_length:%0A if found_block_change:%0A return current_length - previous_length%0A found_block_change = True%0A previous_length = current_length%0A%0A%0Adef find_unknown_text_length(blocksize):%0A adversary_input = ''%0A previous_length = len(new_encryption_oracle(adversary_input))%0A while True:%0A adversary_input += '0'%0A current_length = len(new_encryption_oracle(adversary_input))%0A if current_length %3E previous_length:%0A return current_length - len(adversary_input) - blocksize%0A%0A%0Adef find_single_ecb_character(blocksize, decrypted, unknown_text_length):%0A input_padding = '0'*(blocksize*(unknown_text_length/blocksize + 1) - len(decrypted) - 1)%0A test_padding = input_padding + decrypted%0A block_position = len(test_padding)/blocksize%0A%0A for test_char in printable:%0A test_character = test_padding + test_char%0A%0A test_character_ciphertext = new_encryption_oracle(test_character)%0A test_blocks = %5Btest_character_ciphertext%5Bi*blocksize:(i+1)*blocksize%5D for i in range(len(test_character_ciphertext)/blocksize)%5D%0A%0A ciphertext = new_encryption_oracle(input_padding)%0A cipher_blocks = %5Bciphertext%5Bi*blocksize:(i+1)*blocksize%5D for i in range(len(ciphertext)/blocksize)%5D%0A%0A if test_blocks%5Bblock_position%5D == cipher_blocks%5Bblock_position%5D:%0A return test_char%0A%0A%0Aif __name__ == '__main__':%0A blocksize = find_blocksize()%0A unknown_text_length = find_unknown_text_length(blocksize)%0A%0A chosen_input = '0'*(3*blocksize)%0A detection_ciphertext = new_encryption_oracle(chosen_input)%0A encryption_mode = distinguish_encryption_mode(detection_ciphertext)%0A if encryption_mode == 'ecb':%0A decrypted = ''%0A while len(decrypted) %3C unknown_text_length:%0A decrypted += find_single_ecb_character(blocksize, decrypted, unknown_text_length)%0A print decrypted.decode('base64')%0A
|
|
0ecc153d3946258f7daddd48bfc2870cb497b5db
|
Add IPlugSession interface
|
pyramid_pluggable_session/interfaces.py
|
pyramid_pluggable_session/interfaces.py
|
Python
| 0 |
@@ -0,0 +1,861 @@
+from zope.interface import Interface%0A%0Aclass IPlugSession(Interface):%0A %22%22%22 In interface that describes a pluggable session%0A %22%22%22%0A%0A def loads(session, request):%0A %22%22%22 This function given a %60%60session%60%60 and %60%60request%60%60 should using the%0A %60%60session_id%60%60 attribute of the %60%60session%60%60%0A%0A This function should return either the opaque session information or None.%0A %22%22%22%0A%0A def dumps(session, request, session_data):%0A %22%22%22 This function given a %60%60session%60%60 and %60%60request%60%60 should using the%0A %60%60_session_id%60%60 attribute of the %60%60session%60%60 write the session%0A information, with the %60%60session_id%60%60 being used as a unique identifier,%0A any previously stored session data should overwritten. %60%60session_data%60%60%0A is an opaque object, it's contents are a serialised version of the%0A session data.%0A %22%22%22%0A
|
|
1447a057095fb5b9f3845451e434d81d79e4e795
|
version 0.2.0
|
fb/__init__.py
|
fb/__init__.py
|
Python
| 0.000003 |
@@ -0,0 +1,2 @@
+ %0A
|
|
a0d8eff20cfd8b60be005e31692af74837ca16f5
|
test math.ceil() function
|
pythonPractiseSamples/mathExcercises.py
|
pythonPractiseSamples/mathExcercises.py
|
Python
| 0.000099 |
@@ -0,0 +1,414 @@
+#! /usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A# vim:fenc=utf-8%0A#%0A# Copyright %C2%A9 2016 Damian Ziobro %[email protected]%3E%0A%0Aimport unittest%0Aimport math%0A%0Aclass TestMathMethods(unittest.TestCase):%0A%0A def setUp(self):%0A self.number = 3.5%0A%0A def test_ceil(self):%0A self.assertEqual(math.ceil(self.number), 4);%0A%0Aif __name__ == %22__main__%22: %0A print (%22running unittests for math%22)%0A unittest.main();%0A
|
|
44130357b98001790547d53b7e1080e79842a058
|
add group recorded
|
test_add_group.py
|
test_add_group.py
|
Python
| 0 |
@@ -0,0 +1,1871 @@
+# -*- coding: utf-8 -*-%0Afrom selenium.webdriver.firefox.webdriver import WebDriver%0Afrom selenium.webdriver.common.action_chains import ActionChains%0Aimport time, unittest%0A%0Adef is_alert_present(wd):%0A try:%0A wd.switch_to_alert().text%0A return True%0A except:%0A return False%0A%0Aclass test_add_group(unittest.TestCase):%0A def setUp(self):%0A self.wd = WebDriver()%0A self.wd.implicitly_wait(60)%0A %0A def test_test_add_group(self):%0A success = True%0A wd = self.wd%0A wd.get(%22http://localhost/addressbook/group.php%22)%0A wd.find_element_by_name(%22user%22).click()%0A wd.find_element_by_name(%22user%22).clear()%0A wd.find_element_by_name(%22user%22).send_keys(%22admin%22)%0A wd.find_element_by_id(%22LoginForm%22).click()%0A wd.find_element_by_name(%22pass%22).click()%0A wd.find_element_by_name(%22pass%22).clear()%0A wd.find_element_by_name(%22pass%22).send_keys(%22secret%22)%0A wd.find_element_by_xpath(%22//form%5B@id='LoginForm'%5D/input%5B3%5D%22).click()%0A wd.find_element_by_name(%22new%22).click()%0A wd.find_element_by_name(%22group_name%22).click()%0A wd.find_element_by_name(%22group_name%22).clear()%0A wd.find_element_by_name(%22group_name%22).send_keys(%22new%22)%0A wd.find_element_by_name(%22group_header%22).click()%0A wd.find_element_by_name(%22group_header%22).clear()%0A wd.find_element_by_name(%22group_header%22).send_keys(%22new%22)%0A wd.find_element_by_name(%22group_footer%22).click()%0A wd.find_element_by_name(%22group_footer%22).clear()%0A wd.find_element_by_name(%22group_footer%22).send_keys(%22new%22)%0A wd.find_element_by_name(%22submit%22).click()%0A wd.find_element_by_link_text(%22group page%22).click()%0A wd.find_element_by_link_text(%22Logout%22).click()%0A self.assertTrue(success)%0A %0A def tearDown(self):%0A self.wd.quit()%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
4569c22d2d0245641e0c2696f798f273405c6bee
|
Test recorded and exported to the project
|
test_add_group.py
|
test_add_group.py
|
Python
| 0 |
@@ -0,0 +1,1894 @@
+# -*- coding: utf-8 -*-%0Afrom selenium.webdriver.firefox.webdriver import WebDriver%0Afrom selenium.webdriver.common.action_chains import ActionChains%0Aimport time, unittest%0A%0Adef is_alert_present(wd):%0A try:%0A wd.switch_to_alert().text%0A return True%0A except:%0A return False%0A%0Aclass test_add_group(unittest.TestCase):%0A def setUp(self):%0A self.wd = WebDriver()%0A self.wd.implicitly_wait(60)%0A %0A def test_test_add_group(self):%0A success = True%0A wd = self.wd%0A wd.get(%22http://localhost/addressbook/%22)%0A wd.find_element_by_name(%22user%22).click()%0A wd.find_element_by_name(%22user%22).clear()%0A wd.find_element_by_name(%22user%22).send_keys(%22admin%22)%0A wd.find_element_by_name(%22pass%22).click()%0A wd.find_element_by_name(%22pass%22).clear()%0A wd.find_element_by_name(%22pass%22).send_keys(%22secret%22)%0A wd.find_element_by_xpath(%22//form%5B@id='LoginForm'%5D/input%5B3%5D%22).click()%0A wd.find_element_by_link_text(%22groups%22).click()%0A wd.find_element_by_name(%22new%22).click()%0A wd.find_element_by_name(%22group_name%22).click()%0A wd.find_element_by_name(%22group_name%22).clear()%0A wd.find_element_by_name(%22group_name%22).send_keys(%22group_002%22)%0A wd.find_element_by_name(%22group_header%22).click()%0A wd.find_element_by_name(%22group_header%22).clear()%0A wd.find_element_by_name(%22group_header%22).send_keys(%22another_header%22)%0A wd.find_element_by_name(%22group_footer%22).click()%0A wd.find_element_by_name(%22group_footer%22).clear()%0A wd.find_element_by_name(%22group_footer%22).send_keys(%22another_footer%22)%0A wd.find_element_by_name(%22submit%22).click()%0A wd.find_element_by_link_text(%22group page%22).click()%0A wd.find_element_by_link_text(%22Logout%22).click()%0A self.assertTrue(success)%0A %0A def tearDown(self):%0A self.wd.quit()%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
1ce8285228c29370ad4230f7968abdd7436ff250
|
update nth stair
|
IK/DP/nth_stair.py
|
IK/DP/nth_stair.py
|
Python
| 0.000001 |
@@ -0,0 +1,806 @@
+# http://www.geeksforgeeks.org/count-ways-reach-nth-stair/%0A# This problem is simpl extension of Fibonacci Number%0A%0A# Case 1 when person can take 1 or 2 steps%0A%0Adef fibonacci_number(n):%0A if n %3C= 1:%0A return n%0A return fibonacci_number(n-1) + fibonacci_number(n-2)%0A%0Adef count_ways(s):%0A # ways(1) = fib(2) = 1%0A # ways(2) = fib(3) = 2%0A # ways(3) = fib(4) = 3%0A return fibonacci_number(s+1)%0A%0A# generalized version%0A# if person can take m steps (1,2,3.....m-1,m)%0A%0Adef count_ways_util(n,m):%0A if n %3C= 1:%0A return n%0A res = 0%0A i = 1%0A while i %3C= m and i %3C= n:%0A res += count_ways_util(n-i,m)%0A i += 1%0A return res%0A%0Adef count_ways_generalize(s,m):%0A return count_ways_util(s+1,m)%0A%0Aif __name__ == %22__main__%22:%0A print(%22Number of ways: %22,count_ways_generalize(4,2))%0A
|
|
f30e4fd9a617ab664d3851b05b27bc5512af3551
|
Make remote streamer for ipc allow both str and unicode
|
flexget/ipc.py
|
flexget/ipc.py
|
from __future__ import absolute_import, division, unicode_literals
import logging
import random
import string
import threading
import rpyc
from rpyc.utils.server import ThreadedServer
from flexget.logger import console, capture_output
from flexget.options import get_parser, ParserError
log = logging.getLogger('ipc')
# Allow some attributes from dict interface to be called over the wire
rpyc.core.protocol.DEFAULT_CONFIG['safe_attrs'].update(['items'])
rpyc.core.protocol.DEFAULT_CONFIG['allow_pickle'] = True
IPC_VERSION = 3
AUTH_ERROR = 'authentication error'
AUTH_SUCCESS = 'authentication success'
class RemoteStream(object):
"""
Used as a filelike to stream text to remote client. If client disconnects while this is in use, an error will be
logged, but no exception raised.
"""
def __init__(self, writer):
"""
:param writer: A function which writes a line of text to remote client.
"""
self.buffer = ''
self.writer = writer
def write(self, data):
if not self.writer:
return
self.buffer += data
while '\n' in self.buffer:
line, self.buffer = self.buffer.split('\n', 1)
try:
self.writer(line)
except EOFError:
self.writer = None
log.error('Client ended connection while still streaming output.')
class DaemonService(rpyc.Service):
# This will be populated when the server is started
manager = None
def exposed_version(self):
return IPC_VERSION
def exposed_handle_cli(self, args):
args = rpyc.utils.classic.obtain(args)
parser = get_parser()
try:
options = parser.parse_args(args, raise_errors=True)
except ParserError as e:
# Recreate the normal error text to the client's console
self.client_console('error: ' + e.message)
e.parser.print_help(self.client_out_stream)
return
if not options.cron:
with capture_output(self.client_out_stream, loglevel=options.loglevel):
self.manager.handle_cli(options)
else:
self.manager.handle_cli(options)
def client_console(self, text):
self._conn.root.console(text)
@property
def client_out_stream(self):
return RemoteStream(self._conn.root.console)
class ClientService(rpyc.Service):
def on_connect(self):
"""Make sure the client version matches our own."""
daemon_version = self._conn.root.version()
if IPC_VERSION != daemon_version:
self._conn.close()
raise ValueError('Daemon is different version than client.')
def exposed_version(self):
return IPC_VERSION
def exposed_console(self, text):
console(text)
class IPCServer(threading.Thread):
def __init__(self, manager, port=None):
super(IPCServer, self).__init__(name='ipc_server')
self.daemon = True
self.manager = manager
self.host = '127.0.0.1'
self.port = port or 0
self.password = ''.join(random.choice(string.letters + string.digits) for x in range(15))
self.server = None
def authenticator(self, sock):
channel = rpyc.Channel(rpyc.SocketStream(sock))
password = channel.recv()
if password != self.password:
channel.send(AUTH_ERROR)
raise rpyc.utils.authenticators.AuthenticationError('Invalid password from client.')
channel.send(AUTH_SUCCESS)
return sock, self.password
def run(self):
DaemonService.manager = self.manager
self.server = ThreadedServer(
DaemonService, hostname=self.host, port=self.port, authenticator=self.authenticator, logger=log
)
# If we just chose an open port, write save the chosen one
self.port = self.server.listener.getsockname()[1]
self.manager.write_lock(ipc_info={'port': self.port, 'password': self.password})
self.server.start()
def shutdown(self):
self.server.close()
class IPCClient(object):
def __init__(self, port, password):
channel = rpyc.Channel(rpyc.SocketStream.connect('127.0.0.1', port))
channel.send(password)
response = channel.recv()
if response == AUTH_ERROR:
# TODO: What to raise here. I guess we create a custom error
raise ValueError('Invalid password for daemon')
self.conn = rpyc.utils.factory.connect_channel(channel, service=ClientService)
def close(self):
self.conn.close()
def __getattr__(self, item):
"""Proxy all other calls to the exposed daemon service."""
return getattr(self.conn.root, item)
|
Python
| 0.000001 |
@@ -961,18 +961,20 @@
uffer =
-''
+None
%0A
@@ -1082,46 +1082,289 @@
-self.buffer += data%0A while '%5Cn'
+# This relies on all data up to a newline being either unicode or str, not mixed%0A if not self.buffer.strip():%0A self.buffer = data%0A else:%0A self.buffer += data%0A newline = b'%5Cn' if isinstance(self.buffer, str) else '%5Cn'%0A while newline
in
@@ -1426,20 +1426,23 @@
r.split(
-'%5Cn'
+newline
, 1)%0A
|
d5a42bd23e7227e041aa3d748765b056e3294a0d
|
Create infogan.py
|
InfoGAN/infogan.py
|
InfoGAN/infogan.py
|
Python
| 0 |
@@ -0,0 +1,22 @@
+# initial python file%0A
|
|
af5c39347863f2804bb1e36cb0bf6f1a049530c2
|
add 15-26
|
src/training/Core2/Chapter15RegularExpressions/exercise15_26.py
|
src/training/Core2/Chapter15RegularExpressions/exercise15_26.py
|
Python
| 0.999988 |
@@ -0,0 +1,317 @@
+import re%0D%0A%0D%0A%0D%0Adef replace_email(a_string, new_email):%0D%0A return re.sub('%5Cw+@%5Cw+%5C.%5Cw+', new_email, a_string)%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A assert '[email protected] xx [email protected] b' == replace_email('[email protected] xx [email protected] b', '[email protected]')%0D%0A assert 'abb' == replace_email('abb', '[email protected]')%0D%0A print 'all passed.'%0D%0A
|
|
f730a8cfd6700eeedf1cbcc5df8b3b97f918f0fa
|
Add filterset for tag page, refs #450
|
grouprise/features/tags/filters.py
|
grouprise/features/tags/filters.py
|
Python
| 0 |
@@ -0,0 +1,689 @@
+from django.forms.widgets import CheckboxInput%0Afrom django_filters import BooleanFilter%0Afrom django_filters.widgets import BooleanWidget%0A%0Afrom grouprise.features.associations.filters import ContentFilterSet%0A%0A%0Aclass TagContentFilterSet(ContentFilterSet):%0A tagged_only = BooleanFilter(%0A label='nur verschlagwortete Beitr%C3%A4ge', widget=CheckboxInput,%0A method='filter_tagged_only')%0A%0A def __init__(self, *args, tag=None, **kwargs):%0A self.tag = tag%0A super().__init__(*args, **kwargs)%0A%0A def filter_tagged_only(self, queryset, name, value):%0A if value:%0A queryset = queryset.filter(content__taggeds__tag=self.tag)%0A return queryset%0A
|
|
42c7db3f9422d38b0d7273ad8f95db8183b69a9c
|
Add a python version of the lineset_test ... it demonstrates how one has to run eve from python.
|
tutorials/eve/lineset_test.py
|
tutorials/eve/lineset_test.py
|
Python
| 0.000423 |
@@ -0,0 +1,752 @@
+## Translated from 'lineset_test.C'.%0A## Run as: python -i lineset_test.py%0A%0Aimport ROOT%0AROOT.PyConfig.GUIThreadScheduleOnce += %5B ROOT.TEveManager.Create %5D%0A%0Adef lineset_test(nlines = 40, nmarkers = 4):%0A r = ROOT.TRandom(0)%0A s = 100%0A%0A ls = ROOT.TEveStraightLineSet()%0A%0A for i in range(nlines):%0A ls.AddLine( r.Uniform(-s,s), r.Uniform(-s,s), r.Uniform(-s,s) ,%0A r.Uniform(-s,s), r.Uniform(-s,s), r.Uniform(-s,s))%0A nm = int(nmarkers*r.Rndm())%0A for m in range(nm):%0A ls.AddMarker( i, r.Rndm() )%0A ls.SetMarkerSize(1.5)%0A ls.SetMarkerStyle(4)%0A%0A ROOT.gEve.AddElement(ls)%0A ROOT.gEve.Redraw3D()%0A return ls%0A%0Aif __name__=='__main__':%0A ROOT.PyGUIThread.finishSchedule()%0A lineset_test()%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.