commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
d03254dabaac466edd697de38c3433475828bd4f
|
Add tests for has_changes
|
tests/functions/test_has_changes.py
|
tests/functions/test_has_changes.py
|
Python
| 0.000001 |
@@ -0,0 +1,698 @@
+import sqlalchemy as sa%0Afrom sqlalchemy.ext.declarative import declarative_base%0A%0Afrom sqlalchemy_utils import has_changes%0A%0A%0Aclass TestHasChanges(object):%0A def setup_method(self, method):%0A Base = declarative_base()%0A%0A class Article(Base):%0A __tablename__ = 'article_translation'%0A id = sa.Column(sa.Integer, primary_key=True)%0A title = sa.Column(sa.String(100))%0A%0A self.Article = Article%0A%0A def test_without_changed_attr(self):%0A article = self.Article()%0A assert not has_changes(article, 'title')%0A%0A def test_with_changed_attr(self):%0A article = self.Article(title='Some title')%0A assert has_changes(article, 'title')%0A
|
|
795cd6e190a1cc4d416c5524399780e586dc6c45
|
Add better kitt script
|
kitt/kitt.py
|
kitt/kitt.py
|
Python
| 0 |
@@ -0,0 +1,891 @@
+from microbit import *%0A%0Adisplay.scroll(%22I am the Knight Industries 2000%22)%0A%0AMAX_ROWS = 4%0AMAX_BRIGHTNESS = 9%0AMIN_BRIGHTNESS = 2%0A%0Adef scan(reverse=False):%0A%0A for i in range(0, 9):%0A brightness = MAX_BRIGHTNESS%0A%0A row_range = range(0, i+1) if not reverse else range(i, -1, -1)%0A%0A counter = 0%0A for j in row_range:%0A x = i - j if not reverse else MAX_ROWS - j%0A light_level = max(MIN_BRIGHTNESS, brightness) if counter %3E= 2 else MAX_BRIGHTNESS - counter%0A print (x, light_level)%0A if x %3C= MAX_ROWS and x %3E= 0:%0A display.set_pixel(x, 2, light_level)%0A counter += 1%0A%0A #if i %3E= 2:%0A brightness -= 1%0A%0A print(%22-%22)%0A if i %3C 8:%0A sleep(100)%0A%0Afor x in range(0, MAX_ROWS+1):%0A display.set_pixel(x, 2, MIN_BRIGHTNESS)%0A%0Awhile True:%0A%0A scan()%0A scan(reverse=True)%0A
|
|
a6ac5055a1867259ab17997a076299731e57c45b
|
Add Android extractor
|
strings2pot/extractors/android.py
|
strings2pot/extractors/android.py
|
Python
| 0.000001 |
@@ -0,0 +1,1513 @@
+# -*- coding: utf-8 -*-%0A%0Aimport re%0Aimport xml.etree.ElementTree as ET%0A%0Aclass AndroidExtractor:%0A def __init__(self, source_file, destination_file, context_id_generator):%0A self.source_file = source_file%0A self.destination_file = destination_file%0A self._create_context_id = context_id_generator%0A %0A def parse_string(self, string):%0A s = string.replace(%22%5C%5C'%22, %22'%22)%0A s = s.replace(%22%5C%22%22, %22%5C%5C%5C%22%22)%0A s = s.replace(%22%5C%5Cn%22, %22%5Cn%22)%0A s = re.sub(r'%25%5Cd%5C$s', '%25s', s)%0A s = re.sub(r'%25%5Cd%5C$d', '%25d', s)%0A%0A if %22%5Cn%22 in s:%0A s = s.replace(%22%5Cn%22, %22%5C%5Cn%5Cn%22)%0A parts = s.split(%22%5Cn%22)%0A new_parts = %5B%22%5C%22%5C%22%22%5D%0A for line in parts:%0A new_parts.append(%22%5C%22%25s%5C%22%22 %25 line)%0A%0A s = %22%5Cn%22.join(new_parts)%0A else:%0A s = %22%5C%22%25s%5C%22%22 %25 s%0A return s%0A %0A def run(self):%0A with open(self.destination_file, 'a') as pot:%0A root = ET.parse(self.source_file)%0A counter = 3%0A%0A for el in root.findall('./string'):%0A parsed_string = self.parse_string(el.text)%0A message_id = parsed_string%5B1:len(parsed_string)-1%5D%0A%0A counter += 1%0A content = %22%5Cn#: %25s:%25d%5Cnmsgctxt %5C%22%25s%5C%22%5Cnmsgid %25s%5Cnmsgstr %5C%22%5C%22%5Cn%22 %25 (%0A self.source_file,%0A counter,%0A self._create_context_id(message_id), # was el.attrib.get('name')%0A parsed_string )%0A pot.write(content)
|
|
c711f62ef96d67a6e42e3bbe10c0b3cd64a23444
|
add moviepy - text_hineinzoomen
|
moviepy/text_hineinzoomen.py
|
moviepy/text_hineinzoomen.py
|
Python
| 0.000001 |
@@ -0,0 +1,1661 @@
+#!/usr/bin/env python%0A%0A# Video mit Text erzeugen, hineinzoomen (Text wird gr%C3%B6sser)%0A%0A# Einstellungen%0Atext = 'Text' # Text%0Atextgroesse = 150 # Textgroesse in Pixel%0Atextfarbe_r = 0 # Textfarbe R%0Atextfarbe_g = 0 # Textfarbe G%0Atextfarbe_b = 0 # Textfarbe B%0Aschrift = 'FreeSans' # Schriftart%0Awinkel = 0 # Winkel%0Ahgfarbe_r = 1 # Hintergrundfarbe R%0Ahgfarbe_g = 1 # Hintergrundfarbe G%0Ahgfarbe_b = 1 # Hintergrundfarbe B%0Avideobreite = 1280 # in Pixel%0Avideohoehe = 720 # in Pixel%0Avideolaenge = 5 # in Sekunden %0Avideodatei = 'text.ogv' # Videodatei%0Aframes = 25 # Frames pro Sekunde%0A%0A# Modul moviepy importieren%0Afrom moviepy.editor import *%0A# Modul gizeh importieren%0Aimport gizeh%0A%0A# Funktion um Frames zu erzeugen, t ist die Zeit beim jeweiligen Frame%0Adef create_frame(t):%0A img = gizeh.Surface(videobreite,videohoehe,bg_color=(hgfarbe_r,hgfarbe_g,hgfarbe_b))%0A text_img = gizeh.text(text, fontfamily=schrift, fontsize=t*(textgroesse/videolaenge),%0A fill=(textfarbe_r,textfarbe_g,textfarbe_b),%0A xy=(videobreite/2,videohoehe/2), angle=winkel)%0A text_img.draw(img)%0A return img.get_npimage()%0A%0A# Video erzeugen%0Avideo = VideoClip(create_frame, duration=videolaenge) %0A%0A# Video schreiben%0Avideo.write_videofile(videodatei, fps=frames)%0A%0A%0A# Hilfe fuer moviepy: https://zulko.github.io/moviepy/index.html%0A# Hilfe fuer gizeh: https://github.com/Zulko/gizeh%0A%0A# text_hineinzoomen.py%0A# Lizenz: http://creativecommons.org/publicdomain/zero/1.0/%0A# Author: openscreencast.de%0A%0A
|
|
c61b1595709b6acd26cf7c43e7858e3ad5cb588f
|
Add missing module.
|
csvkit/headers.py
|
csvkit/headers.py
|
Python
| 0 |
@@ -0,0 +1,196 @@
+#!/usr/bin/env python%0A%0Adef make_default_headers(n):%0A %22%22%22%0A Make a set of simple, default headers for files that are missing them.%0A %22%22%22%0A return %5Bu'column%25i' %25 (i + 1) for i in range(n)%5D%0A
|
|
9b50da16238d2f816199c8fb8a20ec558edf5d46
|
Create oie_compress.py
|
oie_compress.py
|
oie_compress.py
|
Python
| 0.000002 |
@@ -0,0 +1,1013 @@
+# 1.0%09much%09be paid on%09insurance claim%0A# 1.0%09much%09is%09paid%0A# 1.0%09much%09is%09paid on insurance claim%0A# 1.0%09much%09be%09paid%0A# -----------------------------------------------------%0A# 1.0%09channel%09joining%09two bodies%0A# 1.0%09channel%09joining%09two larger bodies of water%0A# 1.0%09channel%09joining%09two larger bodies%0A# 1.0%09channel%09joining%09two bodies of water%0A# 1.0%09narrow channel%09joining%09two bodies of water%0A# 1.0%09narrow channel%09joining%09two larger bodies%0A# 1.0%09narrow channel%09joining%09two larger bodies of water%0A# 1.0%09narrow channel%09joining%09two bodies%0A%0Aimport argparse%0A%0Aparser = argparse.ArgumentParser()%0Aparser.add_argument(%22--oie%22, help=%22Input file containing openIE triplets to be compresses.%22, required=True)%0Aparser.add_argument(%22--o%22, help=%22Output file for compressed openIE triplets.%22)%0Aargs = parser.parse_args()%0A%0Awith open(args.oie) as f:%0A triplets=map(str.strip().split(%22%5Ct%22)%5B1:%5D, f.readlines())%0A%0Aif len(triplets) %3C 3:%0A print %22No triplets in file %25s%22 %25 args.oie%0A exit()%0A%0Afor c in xrange(3):%0A %5Brow%5Bc%5D for row in triplets%5D%0A
|
|
302f98844487d894252d3dc3f4d30940fbcbd9e1
|
Allow pex to be invoked using runpy (python -m pex). (#637)
|
pex/__main__.py
|
pex/__main__.py
|
Python
| 0.000065 |
@@ -0,0 +1,235 @@
+# Copyright 2019 Pants project contributors (see CONTRIBUTORS.md).%0A# Licensed under the Apache License, Version 2.0 (see LICENSE).%0A%0Afrom __future__ import absolute_import%0A%0Afrom pex.bin import pex%0A%0A__name__ == '__main__' and pex.main()%0A
|
|
8f506c20ccad47ee6f2454a419145b1b2b48adba
|
Create bold-words-in-string.py
|
Python/bold-words-in-string.py
|
Python/bold-words-in-string.py
|
Python
| 0.999987 |
@@ -0,0 +1,1146 @@
+# Time: O(n * l), n is the length of S, l is the average length of words%0A# Space: O(t), t is the size of trie%0A%0Aclass Solution(object):%0A def boldWords(self, words, S):%0A %22%22%22%0A :type words: List%5Bstr%5D%0A :type S: str%0A :rtype: str%0A %22%22%22%0A _trie = lambda: collections.defaultdict(_trie)%0A trie = _trie()%0A for i, word in enumerate(words):%0A reduce(dict.__getitem__, word, trie)%5B%22_end%22%5D = i%0A%0A lookup = %5BFalse%5D * len(S)%0A for i in xrange(len(S)):%0A curr = trie%0A k = -1%0A for j in xrange(i, len(S)):%0A if S%5Bj%5D not in curr:%0A break%0A curr = curr%5BS%5Bj%5D%5D%0A if %22_end%22 in curr:%0A k = j%0A for j in xrange(i, k+1):%0A lookup%5Bj%5D = True%0A%0A result = %5B%5D%0A for i in xrange(len(S)):%0A if lookup%5Bi%5D and (i == 0 or not lookup%5Bi-1%5D):%0A result.append(%22%3Cb%3E%22)%0A result.append(S%5Bi%5D)%0A if lookup%5Bi%5D and (i == len(S)-1 or not lookup%5Bi+1%5D):%0A result.append(%22%3C/b%3E%22);%0A return %22%22.join(result)%0A
|
|
228f4325aa5f1c8b616f45462280b4a7cb0792dd
|
Add test for empty files to csvjoin
|
tests/test_utilities/test_csvjoin.py
|
tests/test_utilities/test_csvjoin.py
|
#!/usr/bin/env python
import sys
try:
from mock import patch
except ImportError:
from unittest.mock import patch
from csvkit.utilities.csvjoin import CSVJoin, launch_new_instance
from tests.utils import CSVKitTestCase
class TestCSVJoin(CSVKitTestCase):
Utility = CSVJoin
def test_launch_new_instance(self):
with patch.object(sys, 'argv', [self.Utility.__name__.lower(), 'examples/join_a.csv', 'examples/join_b.csv']):
launch_new_instance()
def test_sequential(self):
output = self.get_output_as_io(['examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_inner(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 3)
def test_left(self):
output = self.get_output_as_io(['-c', 'a', '--left', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 5)
def test_right(self):
output = self.get_output_as_io(['-c', 'a', '--right', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 4)
def test_outer(self):
output = self.get_output_as_io(['-c', 'a', '--outer', 'examples/join_a.csv', 'examples/join_b.csv'])
self.assertEqual(len(output.readlines()), 6)
def test_left_short_columns(self):
output = self.get_output_as_io(['-c', 'a', 'examples/join_a_short.csv', 'examples/join_b.csv'])
with open('examples/join_short.csv') as f:
self.assertEqual(output.readlines(), f.readlines())
|
Python
| 0 |
@@ -221,16 +221,32 @@
TestCase
+, EmptyFileTests
%0A%0A%0Aclass
@@ -272,16 +272,32 @@
TestCase
+, EmptyFileTests
):%0A U
@@ -312,16 +312,63 @@
CSVJoin
+%0A default_args = %5B'examples/dummy.csv', '-'%5D
%0A%0A de
|
dbb127a6fbadfa17f5faad45e8d7ebb6b943a77d
|
add basic test for vamp_spectral_centroid
|
tests/test_vamp_spectral_centroid.py
|
tests/test_vamp_spectral_centroid.py
|
Python
| 0.000001 |
@@ -0,0 +1,1047 @@
+#! /usr/bin/env python%0A%0Aimport unittest%0Afrom unit_timeside import TestRunner%0Afrom timeside.plugins.decoder.aubio import AubioDecoder as FileDecoder%0Afrom timeside.core import get_processor%0Afrom timeside.core.tools.test_samples import samples%0A%0A%0Aclass TestVampSpectralCentroid(unittest.TestCase):%0A%0A proc_id = 'vamp_spectral_centroid'%0A%0A def setUp(self):%0A self.analyzer = get_processor(self.proc_id)()%0A%0A def testOnC4Scale(self):%0A %22runs on C4 scale%22%0A self.source = samples%5B%22C4_scale.wav%22%5D%0A%0A def testOnSweep(self):%0A %22runs on sweep%22%0A self.source = samples%5B%22sweep.wav%22%5D%0A%0A def tearDown(self):%0A decoder = FileDecoder(self.source)%0A (decoder %7C self.analyzer).run()%0A results = self.analyzer.results%0A%0A result = results.get_result_by_id(self.proc_id)%0A duration = result.audio_metadata.duration%0A data_duration = result.data_object.time%5B-1%5D%0A%0A self.assertAlmostEqual (duration, data_duration, 1)%0A%0Aif __name__ == '__main__':%0A unittest.main(testRunner=TestRunner())%0A
|
|
86c67f321ec4ee7c254fde4a7f942a83d5e35016
|
Update TFRT dependency to use revision http://github.com/tensorflow/runtime/commit/00734af980b920f9f963badf85fbeb12d576fde5.
|
third_party/tf_runtime/workspace.bzl
|
third_party/tf_runtime/workspace.bzl
|
"""Provides the repository macro to import TFRT."""
load("//third_party:repo.bzl", "tf_http_archive", "tf_mirror_urls")
def repo():
"""Imports TFRT."""
# Attention: tools parse and update these lines.
TFRT_COMMIT = "d1caeb8bdba1851194baf06c28ea09b5b67e5623"
TFRT_SHA256 = "e480ad7451b9e3ce45da61d7107953a4d55789bf6087442fd000a1ecb7c6604e"
tf_http_archive(
name = "tf_runtime",
sha256 = TFRT_SHA256,
strip_prefix = "runtime-{commit}".format(commit = TFRT_COMMIT),
urls = tf_mirror_urls("https://github.com/tensorflow/runtime/archive/{commit}.tar.gz".format(commit = TFRT_COMMIT)),
# A patch file can be provided for atomic commits to both TF and TFRT.
# The job that bumps the TFRT_COMMIT also resets patch_file to 'None'.
patch_file = None,
)
|
Python
| 0 |
@@ -228,133 +228,133 @@
= %22
-d1caeb8bdba1851194baf06c28ea09b5b67e5623%22%0A TFRT_SHA256 = %22e480ad7451b9e3ce45da61d7107953a4d55789bf6087442fd000a1ecb7c6604e
+00734af980b920f9f963badf85fbeb12d576fde5%22%0A TFRT_SHA256 = %220c136cdfb87ae3663c162ad807c57983a8119fa7097fb589c4a7d04b98d09d3d
%22%0A%0A
|
8cf5b328d7596a9b74490b7dfd4a1b8aa1577b55
|
Merge remote-tracking branch 'origin' into AC-9512
|
accelerator/migrations/0110_remove_bucket_list_program_role_20220707_1001.py
|
accelerator/migrations/0110_remove_bucket_list_program_role_20220707_1001.py
|
Python
| 0 |
@@ -0,0 +1,1079 @@
+from django.db import migrations%0A%0A%0Adef remove_bucket_list_program_roles(apps, schema_editor):%0A BucketState = apps.get_model('accelerator', 'BucketState')%0A ProgramRole = apps.get_model('accelerator', 'ProgramRole')%0A ProgramRoleGrant = apps.get_model('accelerator', 'ProgramRoleGrant')%0A NodePublishedFor = apps.get_model('accelerator', 'NodePublishedFor')%0A%0A program_role_ids = BucketState.objects.values_list('program_role_id',%0A flat=True)%0A NodePublishedFor.objects.filter(%0A published_for_id__in=program_role_ids).delete()%0A ProgramRoleGrant.objects.filter(%0A program_role_id__in=program_role_ids).delete()%0A BucketState.objects.all().delete()%0A ProgramRole.objects.filter(pk__in=program_role_ids).delete()%0A%0A%0Aclass Migration(migrations.Migration):%0A dependencies = %5B%0A ('accelerator', '0109_remove_interest_fields_20220705_0425'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(remove_bucket_list_program_roles,%0A migrations.RunPython.noop)%0A %5D%0A
|
|
5945fe5c527b3f5cb2ed104eccdf9266dc702eb1
|
add second order constraint
|
toppra/constraint/can_linear_second_order.py
|
toppra/constraint/can_linear_second_order.py
|
Python
| 0.009653 |
@@ -0,0 +1,2433 @@
+from .canonical_linear import CanonicalLinearConstraint%0Afrom .constraint import DiscretizationType%0Aimport numpy as np%0A%0A%0Aclass CanonicalLinearSecondOrderConstraint(CanonicalLinearConstraint):%0A %22%22%22 A class to represent Canonical Linear Generalized Second-order constraints.%0A%0A Parameters%0A ----------%0A inv_dyn: (array, array, array) -%3E array%0A The %22inverse dynamics%22 function that receives joint position, velocity and%0A acceleration as inputs and ouputs the %22joint torque%22. See notes for more%0A details.%0A cnst_coeffs: (array) -%3E array, array%0A The coefficient functions of the constraints. See notes for more details.%0A%0A Notes%0A -----%0A A constraint of this kind can be represented by the following formula%0A%0A .. math::%0A A(q) %5Cddot q + %5Cdot q%5E%5C%5Ctop B(q) %5Cdot q + C(q) = w,%0A%0A where w is a vector that satisfies the polyhedral constraint%0A%0A .. math::%0A F(q) w %5C%5Cleq g(q).%0A%0A To evaluate the constraint parameters, multiple calls to inv_dyn, cnst_F and cnst_g%0A are made. Specifically one can write the second-order equation as follows%0A%0A .. math::%0A A(q) p'(s) %5Cddot s + %5BA(q) p''(s) + p'(s)%5E%5C%5Ctop B(q) p'(s)%5D + C(q) = w,%0A%0A To evaluate the coefficients a(s), b(s), c(s), inv_dyn is called repeatedly with%0A appropriate arguments.%0A%0A %22%22%22%0A%0A def __init__(self, inv_dyn, cnst_F, cnst_g, discretization_scheme=DiscretizationType.Collocation):%0A super(CanonicalLinearSecondOrderConstraint, self).__init__()%0A self.discretization_type = discretization_scheme%0A self.inv_dyn = inv_dyn%0A self.cnst_F = cnst_F%0A self.cnst_g = cnst_g%0A self._format_string = %22 Generalized Second-order constraint%22%0A self.discretization_type = discretization_scheme%0A%0A def compute_constraint_params(self, path, gridpoints):%0A v_zero = np.zeros(path.get_dof())%0A p = path.eval(gridpoints)%0A ps = path.evald(gridpoints)%0A pss = path.evaldd(gridpoints)%0A%0A F = np.array(map(self.cnst_F, p))%0A g = np.array(map(self.cnst_g, p))%0A c = np.array(%0A map(lambda p_: self.inv_dyn(p_, v_zero, v_zero), p)%0A )%0A a = np.array(%0A map(lambda p_, ps_: self.inv_dyn(p_, v_zero, ps_), zip(p, ps))%0A ) - c%0A b = np.array(%0A map(lambda p_, ps_, pss_: self.inv_dyn(p_, ps_, pss_), zip(p, ps, pss))%0A ) - c%0A%0A return a, b, c, F, g, None, None%0A%0A%0A%0A%0A%0A%0A
|
|
3020472569a49f01331ebb150f004e2684196b8e
|
add expression to improve the domain
|
bin/tools/expression.py
|
bin/tools/expression.py
|
Python
| 0.000012 |
@@ -0,0 +1,2114 @@
+#!/usr/bin/env python%0A%0Adef _is_operator( element ):%0A return isinstance( element, str ) and element in %5B'&','%7C'%5D%0A%0Adef _is_leaf( element ):%0A return isinstance( element, tuple ) and len( element ) == 3 and element%5B1%5D in %5B'=', '%3C%3E', '!=', '%3C=', '%3C', '%3E', '%3E=', 'like', 'not like', 'ilike', 'not ilike'%5D %0A%0Adef _is_expression( element ):%0A return isinstance( element, tuple ) and len( element ) %3E 2 and _is_operator( element%5B0%5D )%0A%0Aclass expression_leaf( object ):%0A def __init__(self, operator, left, right ):%0A self.operator = operator%0A self.left = left%0A self.right = right%0A%0A def parse( self ):%0A return self%0A%0A def to_sql( self ):%0A return %22%25s %25s %25s%22 %25 ( self.left, self.operator, self.right )%0A%0Aclass expression( object ):%0A def __init__( self, exp ):%0A if isinstance( exp, tuple ):%0A if not _is_leaf( exp ) and not _is_operator( exp%5B0%5D ):%0A exp = list( exp )%0A if isinstance( exp, list ):%0A if len( exp ) == 1 and _is_leaf( exp%5B0%5D ):%0A exp = exp%5B0%5D%0A else:%0A if not _is_operator( exp%5B0%5D%5B0%5D ):%0A exp.insert( 0, '&' )%0A exp = tuple( exp )%0A else:%0A exp = exp%5B0%5D%0A%0A self.exp = exp%0A self.operator = '&'%0A self.children = %5B%5D%0A%0A def parse( self ):%0A if _is_leaf( self.exp ):%0A self.children.append( expression_leaf( self.exp%5B1%5D, self.exp%5B0%5D, self.exp%5B2%5D ).parse() )%0A elif _is_expression( self.exp ):%0A self.operator = self.exp%5B0%5D%0A%0A for element in self.exp%5B1:%5D:%0A if not _is_operator( element ) and not _is_leaf(element):%0A self.children.append( expression(element).parse() )%0A else:%0A if _is_leaf(element):%0A self.children.append( expression_leaf( element%5B1%5D, element%5B0%5D, element%5B2%5D ).parse() )%0A return self%0A%0A def to_sql( self ):%0A return %22( %25s )%22 %25 ((%22 %25s %22 %25 %7B'&' : 'AND', '%7C' : 'OR' %7D%5Bself.operator%5D).join(%5Bchild.to_sql() for child in self.children%5D))%0A
|
|
0226bec54c30a31c0005e7318b69c58a379cfbc9
|
refactor output function
|
mystarspilot/view.py
|
mystarspilot/view.py
|
Python
| 0.998671 |
@@ -0,0 +1,1565 @@
+from __future__ import print_function%0Afrom __future__ import absolute_import%0Afrom __future__ import unicode_literals%0Afrom colorama import Fore, Back, Style%0A%0Aclass SearchResultView(object):%0A %0A def print_search_result(self, search_result, keywords=None):%0A %0A if search_result is not None:%0A for repo in search_result:%0A self.print_repo_name(repo)%0A self.print_repo_url(repo)%0A self.print_repo_language(repo)%0A self.print_repo_description(repo)%0A %0A self.print_summary(search_result)%0A %0A def print_summary(self, search_result):%0A self._print('', end='%5Cn')%0A count = len(search_result)%0A fore_color = Fore.GREEN if count else Fore.YELLOW%0A text = %22(%7B%7D star%7B%7D found)%22.format(count if count else %22No%22, 's' if count %3E 1 else '')%0A self._print(text, fore_color, end='%5Cn')%0A %0A def print_repo_name(self, repo):%0A self._print(repo.full_name, Fore.GREEN)%0A %0A def print_repo_url(self, repo):%0A self._print(%22%5B%7B%7D%5D%22.format(repo.html_url), Fore.YELLOW)%0A %0A def print_repo_language(self, repo):%0A if repo.language:%0A self._print(repo.language, Fore.BLUE, end='%5Cn')%0A %0A def print_repo_description(self, repo):%0A if repo.description:%0A self._print(repo.description, end='%5Cn')%0A %0A def _print(self, text='', fore_color=Fore.RESET, end=' '):%0A print(fore_color + text, end='')%0A print(Fore.RESET + Back.RESET + Style.RESET_ALL, end=end)%0A
|
|
245e661c50df41942ca3f0c8ee794532e3c02c4c
|
Translate PowerShell sample NCM.ExecuteScript.ps1 to python
|
samples/ncm_execute_script.py
|
samples/ncm_execute_script.py
|
Python
| 0.999999 |
@@ -0,0 +1,1051 @@
+from __future__ import print_function%0Aimport re%0Aimport requests%0Afrom orionsdk import SwisClient%0Afrom time import sleep%0A%0Adef main():%0A%09npm_server = 'localhost'%0A%09username = 'admin'%0A%09password = ''%0A%0A%09swis = SwisClient(npm_server, username, password)%0A%09%0A%09ip = '10.199.252.6'%0A%09data = swis.query('SELECT NodeID FROM Cirrus.Nodes WHERE AgentIP = @ip', ip=ip)%5B'results'%5D%0A%09nodeId = data%5B0%5D%5B'NodeID'%5D%0A%09script = 'show clock'%0A%0A%09swis.invoke('Cirrus.ConfigArchive', 'Execute', %5BnodeId%5D, script, username)%0A%0A%09transferId = '%7B%7B%7B0%7D%7D%7D:%7B1%7D:ExecuteScript'.format(nodeId, username)%0A%0A%09status = 'Queued'%0A%09while status != 'Complete' and status != 'Error':%0A%09%09sleep(1)%0A%09%09data = swis.query('SELECT T.Status, T.Error FROM Cirrus.TransferQueue T WHERE T.TransferID=@transfer', transfer=transferId)%5B'results'%5D%0A%09%09status = data%5B0%5D%5B'Status'%5D%0A%0A%09data = swis.query('SELECT T.Log FROM Cirrus.TransferQueue T WHERE T.TransferID=@transfer', transfer=transferId)%5B'results'%5D%0A%09output = data%5B0%5D%5B'Log'%5D%0A%09print(output)%0A%0Arequests.packages.urllib3.disable_warnings()%0A%0Aif __name__ == '__main__':%0A%09main()%0A
|
|
7e91549abc8d185deb231c937d7740606f9454ec
|
add pmi element unit test
|
test_pmi_element.py
|
test_pmi_element.py
|
Python
| 0 |
@@ -0,0 +1,740 @@
+#!/usr/bin/env python%0A# -*- coding: UTF-8 -*-%0A#%0A# The unit test case for pmi.TopkHeap and PMIElement%0A#%0A# @author: Jason Wu ([email protected])%0A%0Afrom pmi import PMIElement%0Aimport sys%0Aimport unittest%0A%0Aclass PMIElementTestCase(unittest.TestCase): %0A def setUp(self): %0A pass%0A %0A def tearGt(self): %0A f = PMIElement('f', 12)%0A e = PMIElement('e', 11)%0A self.assertEqual(True, e %3C f)%0A self.assertEqual(True, f %3E e)%0A %0A def testEq(self): %0A f = PMIElement('f', 11)%0A e = PMIElement('e', 11)%0A g = PMIElement('e', 11)%0A self.assertEqual(False, e == f)%0A self.assertEqual(True, e == g)%0A%0A%0A def testPrintSomething(self):%0A pass%0A%0Aif __name__ == %22__main__%22: %0A unittest.main() %0A
|
|
3c52683e759f146ad247c6e397d5d49dd1cc9966
|
Create __init__.py
|
testing/__init__.py
|
testing/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1 @@
+%0A
|
|
a770c91ea6761d890387b4b6e130cb495817eea0
|
Improve the sc2parse debugging script.
|
sc2reader/scripts/sc2parse.py
|
sc2reader/scripts/sc2parse.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import sc2reader
def main():
for replay in sc2reader.load_replays(sys.argv[1:], verbose=True):
pass
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -67,16 +67,33 @@
c2reader
+%0Aimport traceback
%0A%0Adef ma
@@ -107,17 +107,105 @@
for
+argument in sys.argv%5B1:%5D:%0A%09%09for path in sc2reader.utils.get_files(argument):%0A%09%09%09try:%0A%09%09%09%09
replay
-in
+=
sc2
@@ -226,45 +226,688 @@
play
-s(sys.argv%5B1:%5D, verbose=True):%0A%09%09pass
+(path, debug=True)%0A%09%09%09except sc2reader.exceptions.ReadError as e:%0A%09%09%09%09print e.replay.filename%0A%09%09%09%09print '%7Bbuild%7D - %7Breal_type%7D on %7Bmap_name%7D - Played %7Bstart_time%7D'.format(**e.replay.__dict__)%0A%09%09%09%09print '%5BERROR%5D', e.message%0A%09%09%09%09for event in e.game_events%5B-5:%5D:%0A%09%09%09%09%09print '%7B0%7D - %7B1%7D'.format(hex(event.type),event.bytes.encode('hex'))%0A%09%09%09%09e.buffer.seek(e.location)%0A%09%09%09%09print e.buffer.peek(50).encode('hex')%0A%09%09%09%09print%0A%09%09%09except Exception as e:%0A%09%09%09%09print path%0A%09%09%09%09replay = sc2reader.load_replay(path, debug=True, load_level=1)%0A%09%09%09%09print '%7Bbuild%7D - %7Breal_type%7D on %7Bmap_name%7D - Played %7Bstart_time%7D'.format(**replay.__dict__)%0A%09%09%09%09print '%5BERROR%5D', e%0A%09%09%09%09traceback.print_exc()%0A%09%09%09%09print%0A%0A%0A
%0A%0Aif
|
98295608a2ba4519d12212532380253bba4372ed
|
Add script that recommends scrape task schedule based on recent run timings
|
scripts/frequency_analysis.py
|
scripts/frequency_analysis.py
|
Python
| 0 |
@@ -0,0 +1,2297 @@
+import asyncio%0Aimport attr%0Aimport pprint%0Aimport dateutil.parser%0Afrom datetime import timedelta%0Afrom bobsled.core import bobsled%0Afrom bobsled.base import Status%0A%0A%0Adef recommend_frequency_for_task(runs):%0A total_duration = timedelta(seconds=0)%0A longest_duration = timedelta(seconds=0)%0A for run in runs:%0A start = dateutil.parser.parse(run.start)%0A end = dateutil.parser.parse(run.end)%0A duration = end - start%0A total_duration += duration%0A if duration %3E longest_duration:%0A longest_duration = duration%0A average = total_duration / len(runs)%0A if longest_duration.seconds %3C= 60*10:%0A return '0 */2 * * ?'%0A elif longest_duration.seconds %3C= 60*60:%0A return '0 */6 * * ?'%0A else:%0A return 'daily'%0A%0A%0Aasync def analyze_frequency():%0A await bobsled.initialize()%0A tasks = %5Battr.asdict(t) for t in await bobsled.storage.get_tasks()%5D%0A results = await asyncio.gather(%0A *%5Bbobsled.run.get_runs(task_name=t%5B%22name%22%5D, latest=4) for t in tasks%5D%0A )%0A recommendations = %5B%5D%0A for task, latest_runs in zip(tasks, results):%0A # make recommendations for scrape tasks that have runs%0A if latest_runs and '-scrape' in task%5B'name'%5D:%0A if all(run.status is Status.Success for run in latest_runs):%0A recommendation = recommend_frequency_for_task(latest_runs)%0A else:%0A # a recent run failed, made a note of that%0A recommendation = 'n/a - at least one recent task failed'%0A recommendations.append(%7B%0A 'task': task%5B'name'%5D,%0A 'current_schedule': task%5B'triggers'%5D%5B0%5D%5B'cron'%5D,%0A 'recommended': recommendation%0A %7D)%0A%0A changed_recommendations = %5B%5D%0A for recommendation in recommendations:%0A if recommendation%5B'recommended'%5D != 'daily' and 'n/a' not in recommendation%5B'recommended'%5D%5C%0A and recommendation%5B'current_schedule'%5D != recommendation%5B'recommended'%5D:%0A changed_recommendations.append(recommendation)%0A%0A pp = pprint.PrettyPrinter(indent=2)%0A pp.pprint(changed_recommendations)%0A%0A%0Adef main():%0A # asyncio.run(bobsled.initialize()) # this makes a threading problem if it's here%0A asyncio.run(analyze_frequency())%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
dd22ea800dbbeffaace1927804d50da60ee77a30
|
Fix TestPages.test_homepage_with_anonymous_giver.
|
tests/test_pages.py
|
tests/test_pages.py
|
from __future__ import print_function, unicode_literals
import datetime
from mock import patch
import pytz
from gittip.elsewhere.twitter import TwitterAccount
from gittip.testing import GITHUB_USER_UNREGISTERED_LGTEST, Harness
from gittip.testing.client import TestClient
from gittip.utils import update_homepage_queries_once
class TestPages(Harness):
def setUp(self):
super(Harness, self).setUp()
self.client = TestClient()
def get(self, url, returning='body'):
request = self.client.get(url)
return getattr(request, returning)
def test_homepage(self):
actual = self.client.get('/').body
expected = "Sustainable Crowdfunding"
assert expected in actual
def test_homepage_with_anonymous_giver(self):
TwitterAccount("bob", {}).opt_in("bob")
alice = self.make_participant('alice', anonymous=True, last_bill_result='')
alice.set_tip_to('bob', 1)
update_homepage_queries_once(self.db)
actual = self.client.get('/').body
expected = "Anonymous"
assert expected in actual
def test_profile(self):
self.make_participant('cheese',
claimed_time=datetime.datetime.now(pytz.utc))
expected = "I'm grateful for gifts"
actual = self.get('/cheese/').decode('utf8') # deal with cent sign
assert expected in actual
def test_widget(self):
self.make_participant('cheese',
claimed_time=datetime.datetime.now(pytz.utc))
expected = "javascript: window.open"
actual = self.get('/cheese/widget.html')
assert expected in actual
def test_bank_account(self):
expected = "add<br> or change your bank account"
actual = self.get('/bank-account.html')
assert expected in actual
def test_credit_card(self):
expected = "add<br> or change your credit card"
actual = self.get('/credit-card.html')
assert expected in actual
def test_github_associate(self):
expected = "Forbidden, program!"
actual = self.get('/on/github/associate')
assert expected in actual
def test_twitter_associate(self):
expected = "Forbidden, program!"
actual = self.get('/on/twitter/associate')
assert expected in actual
def test_about(self):
expected = "small weekly cash gifts"
actual = self.get('/about/')
assert expected in actual
def test_about_stats(self):
expected = "have joined Gittip"
actual = self.get('/about/stats.html')
assert expected in actual
def test_about_charts(self):
expected = "Money transferred"
actual = self.get('/about/charts.html')
assert expected in actual
@patch('gittip.elsewhere.github.requests')
def test_github_proxy(self, requests):
requests.get().status_code = 200
requests.get().text = GITHUB_USER_UNREGISTERED_LGTEST
expected = "lgtest has not joined"
actual = self.get('/on/github/lgtest/').decode('utf8')
assert expected in actual
# This hits the network. XXX add a knob to skip this
def test_twitter_proxy(self):
expected = "twitter has not joined"
actual = self.get('/on/twitter/twitter/').decode('utf8')
assert expected in actual
def test_404(self):
actual = self.get('/about/four-oh-four.html')
assert "Page Not Found" in actual
assert "{%" not in actual
def test_bank_account_complete(self):
expected = "Page Not Found"
actual = self.get('/bank-account-complete.html')
assert expected in actual
def test_bank_account_json(self):
expected = "Page Not Found"
actual = self.get('/bank-account.json')
assert expected in actual
def test_credit_card_json(self):
expected = "Page Not Found"
actual = self.get('/credit-card.json')
assert expected in actual
|
Python
| 0 |
@@ -322,16 +322,65 @@
es_once%0A
+from aspen.http.request import UnicodeWithParams%0A
%0A%0Aclass
@@ -833,33 +833,72 @@
-TwitterAccount(%22bob%22
+self.platforms.twitter.get_account(UnicodeWithParams('bob'
, %7B%7D)
+)
.opt
|
39004c89a79b7932060e66ce3ad868156bdaf936
|
set default deepmind lab env to demo_map
|
tensorforce/examples/lab_main.py
|
tensorforce/examples/lab_main.py
|
# Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Deepmind lab execution
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import os
import argparse
import logging
import numpy as np
import deepmind_lab
from tensorforce.config import Config
from tensorforce.external.deepmind_lab import DeepMindLabEnvironment
from tensorforce.util.experiment_util import build_preprocessing_stack
from tensorforce.util.agent_util import create_agent
from tensorforce.util.config_util import log_levels
from tensorforce.execution import Runner
def main():
parser = argparse.ArgumentParser()
parser.add_argument('level_id', default='',help="DeepMind Lab level id")
parser.add_argument('-a', '--agent', default='DQNAgent')
parser.add_argument('-c', '--agent-config', help="Agent configuration file")
parser.add_argument('-n', '--network-config', help="Network configuration file")
parser.add_argument('-e', '--episodes', type=int, default=50000, help="Number of episodes")
parser.add_argument('-t', '--max-timesteps', type=int, default=2000, help="Maximum number of timesteps per episode")
parser.add_argument('-m', '--monitor', help="Save results to this directory")
parser.add_argument('-ms', '--monitor-safe', action='store_true', default=False, help="Do not overwrite previous results")
parser.add_argument('-mv', '--monitor-video', type=int, default=0, help="Save video every x steps (0 = disabled)")
parser.add_argument('-s', '--save', help="Save agent to this dir")
parser.add_argument('-se', '--save-episodes', type=int, default=100, help="Save agent every x episodes")
parser.add_argument('-l', '--load', help="Load agent from this dir")
parser.add_argument('-D', '--debug', action='store_true', default=False, help="Show debug outputs")
args = parser.parse_args()
env = DeepMindLabEnvironment(args.level_id)
config = Config({
'repeat_actions': 1,
'actions': env.actions,
'action_shape': env.action_shape,
'state_shape': env.state_shape,
'max_episode_length': args.max_timesteps
})
if args.agent_config:
config.read_json(args.agent_config)
if args.network_config:
config.read_json(args.network_config)
logger = logging.getLogger(__name__)
logger.setLevel(log_levels[config.loglevel])
preprocessing_config = config.get('preprocessing')
if preprocessing_config:
stack = build_preprocessing_stack(preprocessing_config)
config.state_shape = stack.shape(config.state_shape)
else:
stack = None
if args.debug:
logger.info("-" * 16)
logger.info("File configuration:")
logger.info(config)
agent = create_agent(args.agent, config)
if args.load:
load_dir = os.path.dirname(args.load)
if not os.path.isdir(load_dir):
raise OSError("Could not load agent from {}: No such directory.".format(load_dir))
agent.load_model(args.load)
if args.debug:
logger.info("-" * 16)
logger.info("Agent configuration:")
logger.info(agent.config)
if agent.model:
logger.info("Model configuration:")
logger.info(agent.model.config)
runner = Runner(agent, env, preprocessor=stack, repeat_actions=config.repeat_actions)
if args.save:
save_dir = os.path.dirname(args.save)
if not os.path.isdir(save_dir):
try:
os.mkdir(save_dir, 0o755)
except OSError:
raise OSError("Cannot save agent to dir {} ()".format(save_dir))
runner.save_model(args.save, args.save_episodes)
report_episodes = args.episodes // 1000
if args.debug:
report_episodes = 10
def episode_finished(r):
if r.episode % report_episodes == 0:
logger.info("Finished episode {ep} after {ts} timesteps".format(ep=r.episode + 1, ts=r.timestep + 1))
logger.info("Episode reward: {}".format(r.episode_rewards[-1]))
logger.info("Average of last 500 rewards: {}".format(np.mean(r.episode_rewards[-500:])))
logger.info("Average of last 100 rewards: {}".format(np.mean(r.episode_rewards[-100:])))
return True
logger.info("Starting {agent} for Environment '{env}'".format(agent=agent, env=env))
runner.run(args.episodes, args.max_timesteps, episode_finished=episode_finished)
logger.info("Learning finished. Total episodes: {ep}".format(ep=runner.episode + 1))
env.close()
if __name__ == '__main__':
main()
|
Python
| 0 |
@@ -895,16 +895,17 @@
ind_lab%0A
+%0A
from ten
@@ -1319,16 +1319,30 @@
efault='
+tests/demo_map
',help=%22
|
e6642dd9c9cad6aca3cb70e4cca53afe51494d4b
|
Add a test for checking setup.py
|
tests/test_setup.py
|
tests/test_setup.py
|
Python
| 0.000002 |
@@ -0,0 +1,121 @@
+r%22%22%22 Testspectra_gen functions%22%22%22%0A%0A%0Adef test_setup():%0A import os%0A cmd = %22python3 setup.py check%22%0A os.system(cmd)
|
|
0424eb7dd8e55e2f88f088c3a84c8e962d89f06e
|
build perf from source
|
tools/perf_build.py
|
tools/perf_build.py
|
Python
| 0 |
@@ -0,0 +1,1170 @@
+#!/usr/bin/env python%0Aimport platform%0Aimport subprocess%0A%0Aif subprocess.call('which sudo', shell=True) == 0:%0A with_sudo = 'sudo ' %0Aelse:%0A with_sudo = '' %0A%0Amajor = int(platform.release().split('.')%5B0%5D)%0Aminor = int(platform.release().split('.')%5B1%5D)%0Arevision = int(platform.release().split('.')%5B2%5D.split('-')%5B0%5D)%0Aurl_kernel = 'https://cdn.kernel.org/pub/linux/kernel/v%25d.x/linux-%25d.%25d.tar.gz' %25 (major, major, minor)%0Atarfile = 'linux-%25d.%25d.tar.gz' %25 (major, minor)%0Asource_dir = 'linux-%25d.%25d' %25 (major, minor)%0Aprint('URL: ', url_kernel)%0Aprint('TarFile: ', tarfile)%0Asubprocess.call('rm -r %25s' %25 (source_dir), shell=True)%0Asubprocess.call('rm %25s' %25 (tarfile), shell=True)%0Asubprocess.call('wget %25s' %25 (url_kernel) , shell=True)%0Asubprocess.call('tar xf %25s && make -j -C %25s/tools/perf' %25 (tarfile, source_dir) , shell=True)%0Asubprocess.call(with_sudo + 'cp %25s/tools/perf/perf /usr/bin/' %25 (source_dir) , shell=True)%0Asubprocess.call('rm -r %25s' %25 (source_dir), shell=True)%0Asubprocess.call('rm %25s' %25 (tarfile), shell=True)%0Asubprocess.call('ls -lah /usr/bin/perf', shell=True)%0A#get kernelversion%0A#wget http://www.kernel.org/pub/linux/kernel/v2.6/testing/linux-2.6.33-rc3.tar.bz2%0A%0A
|
|
ef3e07794d4245b9d4a1d0007a0b9099d5bafaf9
|
Add asteval wrapper
|
project/asteval_wrapper.py
|
project/asteval_wrapper.py
|
Python
| 0.000001 |
@@ -0,0 +1,1694 @@
+from asteval import Interpreter%0A%0Aimport functools%0Aimport re%0A%0A%0Aclass Script(object):%0A def __init__(self):%0A %22%22%22%0A Sets up an interpreter.%0A %22%22%22%0A self.interpreter = Interpreter()%0A self.symtable%5B're'%5D = re%0A%0A @property%0A def symtable(self):%0A %22%22%22%0A Expose the internal symbol table.%0A %22%22%22%0A return self.interpreter.symtable%0A%0A @symtable.setter%0A def symtable(self, symtable):%0A %22%22%22%0A Apply changes to the internal symbol table.%0A %22%22%22%0A self.interpreter.symtable = symtable%0A%0A def add_file(self, path):%0A %22%22%22%0A Adds and loads code from a script file.%0A %22%22%22%0A with open(path, 'rb') as f:%0A self.interpreter(f.read())%0A%0A def invoke(self, name, *args, **kwargs):%0A %22%22%22%0A Invokes a function in the script with the appropriate arguments.%0A %22%22%22%0A f = self.interpreter.symtable.get(name, None)%0A%0A if not callable(f):%0A return%0A%0A return f(*args, **kwargs)%0A%0A def __getattr__(self, name):%0A %22%22%22%0A Returns the function to invoke a function in the script, if a function%0A with that name exists within the symbol table. Otherwise, an attribute%0A error is being raised (default behaviour).%0A %22%22%22%0A if name in %5B'symtable', 'interpreter'%5D:%0A raise AttributeError(%22%7B%7D instance has no attribute '%7B%7D'%22.format(%0A self.__class__.__name__, name))%0A%0A if not callable(self.symtable.get(name, None)):%0A raise AttributeError(%22%7B%7D instance has no attribute '%7B%7D'%22.format(%0A self.__class__.__name__, name))%0A%0A return functools.partial(self.invoke, name)%0A
|
|
126863fd6c2a13491b92d546d3e886d0e0da492b
|
Add experiment for nodejs.
|
swig/node/binding.gyp
|
swig/node/binding.gyp
|
Python
| 0 |
@@ -0,0 +1,1038 @@
+%7B%0A %22targets%22: %5B%0A %7B%0A %22target_name%22: %22velocypack%22,%0A %22sources%22: %5B %22../../src/asm-functions.cpp%22, %0A %22../../src/AttributeTranslator.cpp%22,%0A %22../../src/Builder.cpp%22,%0A %22../../src/Collection.cpp%22,%0A %22../../src/Dumper.cpp%22,%0A %22../../src/Exception.cpp%22,%0A %22../../src/fasthash.cpp%22,%0A %22../../src/fpconv.cpp%22,%0A %22../../src/HexDump.cpp%22,%0A %22../../src/Iterator.cpp%22,%0A %22../../src/Options.cpp%22,%0A %22../../src/Parser.cpp%22,%0A %22../../src/Slice.cpp%22,%0A %22../../src/ValueType.cpp%22,%0A %22../../src/velocypack-common.cpp%22,%0A %22../../src/Version.cpp%22,%0A %22velocypack_wrap.cxx%22 %5D,%0A %22include_dirs%22: %5B %22../../include%22, %22../../src%22, %22/usr/local/node-v5.0.0-linux-x64/include/node%22 %5D,%0A %22cflags!%22: %5B %22-fno-exceptions%22 %5D,%0A %22cflags_cc!%22: %5B %22-fno-exceptions%22 %5D%0A %7D%0A %5D%0A%7D%0A
|
|
ad0a1bf70dc2776c88115389400fd6958e49ecc8
|
Add rsync package
|
var/spack/packages/rsync/package.py
|
var/spack/packages/rsync/package.py
|
Python
| 0.000001 |
@@ -0,0 +1,448 @@
+from spack import *%0A%0Aclass Rsync(Package):%0A %22%22%22rsync is an open source utility that provides fast incremental file transfer.%22%22%22%0A homepage = %22https://rsync.samba.org%22%0A url = %22https://download.samba.org/pub/rsync/rsync-3.1.1.tar.gz%22%0A%0A version('3.1.1', '43bd6676f0b404326eee2d63be3cdcfe')%0A%0A # depends_on(%22foo%22)%0A%0A def install(self, spec, prefix):%0A configure('--prefix=%25s' %25 prefix)%0A%0A make()%0A make(%22install%22)%0A
|
|
d205284e21f5fad8195d796ad356042cb5c47894
|
add log test
|
py_logging/test_logging.py
|
py_logging/test_logging.py
|
Python
| 0.000001 |
@@ -0,0 +1,1544 @@
+#!/usr/bin/env python%0A# encoding: utf-8%0A%0Aimport logging%0Aimport os%0Aimport time%0Afrom unittest import TestCase%0A%0A%0Aclass TestLogging(TestCase):%0A def setUp(self):%0A dir_path = os.path.dirname(__file__)%0A self.logfile = os.path.join(dir_path, %22tmp.log%22)%0A self.logger = logging.getLogger(%0A %22test_logger_%25s%22 %25 int(time.time() * 1000))%0A%0A def tearDown(self):%0A if os.path.exists(self.logfile):%0A os.remove(self.logfile)%0A%0A def log_lines(self):%0A with open(self.logfile, %22rt%22) as fp:%0A return %5Bl.strip() for l in fp%5D%0A%0A def test_logger(self):%0A self.assertEqual(self.logger.level, logging.NOTSET)%0A%0A def test_filehandler(self):%0A filehdr = logging.FileHandler(self.logfile)%0A self.logger.addHandler(filehdr)%0A self.logger.setLevel(logging.INFO)%0A%0A self.logger.debug(%22debug%22)%0A self.logger.info(%22info%22)%0A self.logger.warning(%22warning%22)%0A self.logger.error(%22error%22)%0A self.logger.critical(%22critical%22)%0A%0A self.assertListEqual(self.log_lines(), %5B%0A %22info%22, %22warning%22, %22error%22, %22critical%22%5D)%0A%0A def test_format(self):%0A filehdr = logging.FileHandler(self.logfile)%0A logfmt = logging.Formatter(%22test: %25(name)s %25(levelname)-8s %25(message)s%22)%0A filehdr.setFormatter(logfmt)%0A self.logger.addHandler(filehdr)%0A self.logger.setLevel(logging.INFO)%0A%0A self.logger.info(%22info%22)%0A%0A self.assertListEqual(self.log_lines(), %5B%0A %22test: %25s INFO info%22 %25 (self.logger.name,)%5D)%0A
|
|
2e2bae00f7b098e5fd20f2901b4f70554e250d2d
|
add program to plot offset distribution
|
python/plot_offset_dist.py
|
python/plot_offset_dist.py
|
Python
| 0 |
@@ -0,0 +1,2516 @@
+#!/usr/bin/env python%0Aimport argparse%0A%0Aimport numpy as np%0A%0Aimport glob%0A%0Aimport matplotlib as mpl%0Ampl.use('Agg')%0Ampl.rcParams.update(%7B'font.size': 10%7D)%0Aimport matplotlib.pyplot as plt%0A%0Adef add_stat_legend(x):%0A textstr = '$%5Cmathrm%7BN%7D=%25d$%5Cn$%5Cmathrm%7Bmean%7D=%25.2f$%5Cn$%5Cmathrm%7Bmedian%7D=%25.2f$%5Cn$%5Cmathrm%7Bstd%7D=%25.2f$' %25 (%0A len(x), np.nanmean(x), np.nanmedian(x), np.nanstd(x))%0A props = dict(boxstyle='round', facecolor='white')%0A plt.text(0.95, 0.95, textstr, transform=plt.gca().transAxes, va='top', ha='right', bbox=props)%0A%0Adef main():%0A # parse command-line arguments%0A parser = argparse.ArgumentParser(%0A formatter_class=argparse.ArgumentDefaultsHelpFormatter)%0A parser.add_argument(%22--verbose%22, action=%22store_true%22,%0A help=%22print verbose output%22)%0A parser.add_argument(%22-o%22, %22--output%22, type=str, default=None,%0A help=%22output file base name%22)%0A parser.add_argument(%22-i%22, %22--input%22, type=str, default=None,%0A help=%22required input file%22)%0A args = parser.parse_args()%0A%0A nfiles = len(filenames)%0A%0A # the first is the fiberid and the next two columns are xfocal and yfocal positions of the target%0A nidtokens = 3%0A # the rest are the tabulated throughput correction values%0A npoints = 71%0A%0A # the throughput correction vectors span the range 3500A to 10500A%0A xvalues = np.linspace(3500, 10500, npoints, endpoint=True)%0A%0A offset_dict = %7B%7D%0A for x in xvalues:%0A offset_dict%5Bx%5D = %5B%5D%0A%0A offsets = %5B%5D%0A%0A for i,filename in enumerate(filenames):%0A plate, mjd = filename.split('.')%5B0%5D.split('-')%5B-2:%5D%0A data = np.loadtxt(filename, ndmin=2)%0A%0A nentries, ntokens = data.shape%0A%0A assert ntokens == 3*npoints + nidtokens%0A%0A for row in data:%0A fiberid, xfocal, yfocal = row%5B0:nidtokens%5D%0A offset = row%5Bnidtokens+0::3%5D%0A fiber_fraction = row%5Bnidtokens+1::3%5D%0A tpcorr = row%5Bnidtokens+2::3%5D%0A%0A offsets.append(offsets)%0A%0A offsets_array = np.vstack(offsets)%0A%0A for i,x in enumerate(xvalues):%0A offsets_wave_slice = offsets_array%5B:,i%5D%0A%0A fig = plt.figure(figsize=(8,6))%0A plt.hist(offsets_wave_slice, bins=50, histtype='stepfilled', alpha=0.5)%0A plt.xlabel('Centroid offset (arcseconds)')%0A plt.ylabel('Counts')%0A plt.title(r%25'$%5Clambda = %25s$' %25 x)%0A plt.xlim(%5B0, 2%5D)%0A%0A add_stat_legend(offsets_wave_slice)%0A%0A plt.grid(True)%0A%0A fig.savefig(args.output+'-%25s.png'%25x, bbox_inches='tight')%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
091432b795e3b5571887eb924fb831060d2fd53b
|
Add logging setup
|
turbinia/config/logger.py
|
turbinia/config/logger.py
|
Python
| 0.000001 |
@@ -0,0 +1,1371 @@
+# Copyright 2017 Google Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%22%22%22Sets up logging.%22%22%22%0A%0Aimport logging%0A%0Afrom turbinia import config%0A%0Adef setup(root=False):%0A %22%22%22Set up logging parameters.%22%22%22%0A config.LoadConfig()%0A log = logging.getLogger('turbinia')%0A%0A fh = logging.FileHandler(config.LOG_FILE)%0A formatter = logging.Formatter(u'%25(asctime)s:%25(levelname)s:%25(message)s')%0A fh.setFormatter(formatter)%0A fh.setLevel(logging.DEBUG)%0A%0A ch = logging.StreamHandler()%0A formatter = logging.Formatter(u'%5B%25(levelname)s%5D %25(message)s')%0A ch.setFormatter(formatter)%0A%0A log.addHandler(fh)%0A log.addHandler(ch)%0A%0A # Optionally configure the root logger because other modules like PSQ use%0A # this, and we want to see log messages from it when executing from CLI.%0A if root:%0A root_log = logging.getLogger()%0A root_log.addHandler(ch)%0A root_log.setLevel(logging.DEBUG)%0A
|
|
f9998701bafa24fce25156751fefdfa97074c801
|
Add protocol conformance map
|
utils/gyb_syntax_support/protocolsMap.py
|
utils/gyb_syntax_support/protocolsMap.py
|
Python
| 0 |
@@ -0,0 +1,541 @@
+SYNTAX_BUILDABLE_EXPRESSIBLE_AS_CONFORMANCES = %7B%0A 'ExpressibleAsConditionElement': %5B%0A 'ExpressibleAsConditionElementList'%0A %5D,%0A 'ExpressibleAsDeclBuildable': %5B%0A 'ExpressibleAsCodeBlockItem',%0A 'ExpressibleAsMemberDeclListItem',%0A 'ExpressibleAsSyntaxBuildable'%0A %5D,%0A 'ExpressibleAsStmtBuildable': %5B%0A 'ExpressibleAsCodeBlockItem',%0A 'ExpressibleAsSyntaxBuildable'%0A %5D,%0A 'ExpressibleAsExprList': %5B%0A 'ExpressibleAsConditionElement',%0A 'ExpressibleAsSyntaxBuildable'%0A %5D%0A%7D%0A
|
|
143eb4665e76065ec67b5dd42cfe84e238d50094
|
use per post winner count if available to overide settings
|
candidates/constants.py
|
candidates/constants.py
|
Python
| 0 |
@@ -0,0 +1,81 @@
+ELECTION_ID_REGEX = r'(?P%3Celection%3E%5B%5E/%5D+)'%0APOST_ID_REGEX = r'(?P%3Cpost_id%3E%5B%5E/%5D+)'%0A
|
|
6aef9ab419b09822b2255141349144ac8978e862
|
Add migration for h5p kind.
|
kolibri/core/content/migrations/0025_add_h5p_kind.py
|
kolibri/core/content/migrations/0025_add_h5p_kind.py
|
Python
| 0 |
@@ -0,0 +1,925 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.23 on 2019-12-19 02:29%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Afrom django.db import models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A (%22content%22, %220024_channelmetadata_public%22),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name=%22contentnode%22,%0A name=%22kind%22,%0A field=models.CharField(%0A blank=True,%0A choices=%5B%0A (%22topic%22, %22Topic%22),%0A (%22video%22, %22Video%22),%0A (%22audio%22, %22Audio%22),%0A (%22exercise%22, %22Exercise%22),%0A (%22document%22, %22Document%22),%0A (%22html5%22, %22HTML5 App%22),%0A (%22slideshow%22, %22Slideshow%22),%0A (%22h5p%22, %22H5P%22),%0A %5D,%0A max_length=200,%0A ),%0A ),%0A %5D%0A
|
|
39b00572d7888895bcf552999f80b712c1738142
|
Create BillboardIter.py
|
BillboardIter.py
|
BillboardIter.py
|
Python
| 0 |
@@ -0,0 +1,2374 @@
+from datetime import date, timedelta%0A%0A%0Aclass BillboardDates():%0A%0A '''Iterator over valid Billboard Chart weeks, which is%0A supposed to be a per-class singleton for start quantization'''%0A%0A def __init__(self, endDate=date.today()):%0A assert type(endDate) is str or type(endDate) is date%0A self.endDate = endDate%0A if type(endDate) is not date:%0A self.endDate = self.str_to_date(endDate)%0A self.currentDate = date(1958, 8, 9)%0A%0A def __iter__(self):%0A return self%0A%0A def __next__(self):%0A if self.compare_dates(self.endDate) %3E= 0:%0A raise StopIteration%0A current = self.currentDate%0A self.increment()%0A return current%0A%0A def str_to_date(self, string):%0A year, month, day = string.split('-')%0A return date(int(year), int(month), int(day))%0A%0A def increment(self, days=7):%0A '''Serves as an abstraction barrier'''%0A self.currentDate = self.currentDate + timedelta(days)%0A%0A def __repr__(self):%0A return str(self.currentDate)%0A%0A def compare_dates(self, dateObj):%0A '''Returns 1 if current date is larger, 0 if equal, -1 if smaller'''%0A # check year first%0A if self.currentDate %3E dateObj:%0A return 1%0A elif self.currentDate %3C dateObj:%0A return -1%0A return 0 # if they are equal%0A%0A%0Aclass BillboardIter(BillboardDates):%0A%0A '''Iterator over valid Billboard Chart weeks, which%0A quantizes the start to the next valid date'''%0A _BillboardDates = BillboardDates()%0A%0A def __init__(self, startDate, endDate=date.today()):%0A assert type(startDate) is str or type(startDate) is date%0A super().__init__(endDate)%0A self.initDate = startDate%0A if type(self.initDate) is not date:%0A self.initDate = self.str_to_date(self.initDate)%0A self.currentDate = self.initDate%0A self.quantizeStart()%0A%0A def reset(self):%0A self.currentDate = self.initDate%0A self.quantizeStart()%0A%0A def quantizeStart(self):%0A '''Quantizes starting date to the closest following Billboard chart'''%0A bbDate = self._BillboardDates.currentDate%0A while self.compare_dates(bbDate) %3E= 0: # get BB date up to start%0A bbDate = next(self._BillboardDates)%0A while self.compare_dates(bbDate) %3C 0: # get start up to valid BB date%0A self.increment(1)%0A
|
|
566850c873f6bdbed6632388330f8e4df6fbe613
|
add migration for accordeon block on homepage
|
meinberlin/apps/cms/migrations/0021_add_accordeon_block.py
|
meinberlin/apps/cms/migrations/0021_add_accordeon_block.py
|
Python
| 0 |
@@ -0,0 +1,2269 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.16 on 2018-11-08 12:57%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Aimport meinberlin.apps.cms.blocks%0Aimport wagtail.wagtailcore.blocks%0Aimport wagtail.wagtailcore.fields%0Aimport wagtail.wagtailimages.blocks%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('meinberlin_cms', '0020_add_header_block'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='homepage',%0A name='body',%0A field=wagtail.wagtailcore.fields.StreamField((('paragraph', wagtail.wagtailcore.blocks.RichTextBlock(template='meinberlin_cms/blocks/richtext_block.html')), ('call_to_action', wagtail.wagtailcore.blocks.StructBlock((('body', wagtail.wagtailcore.blocks.RichTextBlock()), ('link', wagtail.wagtailcore.blocks.CharBlock()), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=50))))), ('image_call_to_action', wagtail.wagtailcore.blocks.StructBlock((('image', wagtail.wagtailimages.blocks.ImageChooserBlock()), ('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('body', wagtail.wagtailcore.blocks.RichTextBlock()), ('link', wagtail.wagtailcore.blocks.CharBlock()), ('link_text', wagtail.wagtailcore.blocks.CharBlock(label='Link Text', max_length=50))))), ('columns_text', wagtail.wagtailcore.blocks.StructBlock((('columns_count', wagtail.wagtailcore.blocks.ChoiceBlock(choices=%5B(2, 'Two columns'), (3, 'Three columns'), (4, 'Four columns')%5D)), ('columns', wagtail.wagtailcore.blocks.ListBlock(wagtail.wagtailcore.blocks.RichTextBlock(label='Column body')))))), ('projects', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock(max_length=80)), ('projects', wagtail.wagtailcore.blocks.ListBlock(meinberlin.apps.cms.blocks.ProjectSelectionBlock(label='Project')))))), ('activities', wagtail.wagtailcore.blocks.StructBlock((('heading', wagtail.wagtailcore.blocks.CharBlock(label='Heading')), ('count', wagtail.wagtailcore.blocks.IntegerBlock(default=5, label='Count'))))), ('accordion', wagtail.wagtailcore.blocks.StructBlock((('title', wagtail.wagtailcore.blocks.CharBlock()), ('body', wagtail.wagtailcore.blocks.RichTextBlock(required=False))))))),%0A ),%0A %5D%0A
|
|
be898f26984da4ee92da1a027e47775cd816fed5
|
Fix typo (?) in authentication logic
|
nova/api/openstack/auth.py
|
nova/api/openstack/auth.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.import datetime
import datetime
import hashlib
import json
import time
import logging
import webob.exc
import webob.dec
from nova import auth
from nova import context
from nova import db
from nova import flags
from nova import manager
from nova import utils
from nova import wsgi
from nova.api.openstack import faults
FLAGS = flags.FLAGS
class AuthMiddleware(wsgi.Middleware):
"""Authorize the openstack API request or return an HTTP Forbidden."""
def __init__(self, application, db_driver=None):
if not db_driver:
db_driver = FLAGS.db_driver
self.db = utils.import_object(db_driver)
self.auth = auth.manager.AuthManager()
super(AuthMiddleware, self).__init__(application)
@webob.dec.wsgify
def __call__(self, req):
if not self.has_authentication(req):
return self.authenticate(req)
user = self.get_user_by_authentication(req)
if not user:
return faults.Fault(webob.exc.HTTPUnauthorized())
project = self.auth.get_project(FLAGS.default_project)
req.environ['nova.context'] = context.RequestContext(user, project)
return self.application
def has_authentication(self, req):
return 'X-Auth-Token' in req.headers
def get_user_by_authentication(self, req):
return self.authorize_token(req.headers["X-Auth-Token"])
def authenticate(self, req):
# Unless the request is explicitly made against /<version>/ don't
# honor it
path_info = req.path_info
if len(path_info) > 1:
return faults.Fault(webob.exc.HTTPUnauthorized())
try:
username = req.headers['X-Auth-User']
key = req.headers['X-Auth-Key']
except KeyError:
return faults.Fault(webob.exc.HTTPUnauthorized())
token, user = self._authorize_user(username, key, req)
if user and token:
res = webob.Response()
res.headers['X-Auth-Token'] = token.token_hash
res.headers['X-Server-Management-Url'] = \
token.server_management_url
res.headers['X-Storage-Url'] = token.storage_url
res.headers['X-CDN-Management-Url'] = token.cdn_management_url
res.content_type = 'text/plain'
res.status = '204'
return res
else:
return faults.Fault(webob.exc.HTTPUnauthorized())
def authorize_token(self, token_hash):
""" retrieves user information from the datastore given a token
If the token has expired, returns None
If the token is not found, returns None
Otherwise returns dict(id=(the authorized user's id))
This method will also remove the token if the timestamp is older than
2 days ago.
"""
ctxt = context.get_admin_context()
token = self.db.auth_get_token(ctxt, token_hash)
if token:
delta = datetime.datetime.now() - token.created_at
if delta.days >= 2:
self.db.auth_destroy_token(ctxt, token)
else:
return self.auth.get_user(token.user_id)
return None
def _authorize_user(self, username, key, req):
"""Generates a new token and assigns it to a user.
username - string
key - string API key
req - webob.Request object
"""
ctxt = context.get_admin_context()
user = self.auth.get_user_from_access_key(key)
if user and user.name == username:
token_hash = hashlib.sha1('%s%s%f' % (username, key,
time.time())).hexdigest()
token_dict = {}
token_dict['token_hash'] = token_hash
token_dict['cdn_management_url'] = ''
# Same as auth url, e.g. http://foo.org:8774/baz/v1.0
token_dict['server_management_url'] = req.url
token_dict['storage_url'] = ''
token_dict['user_id'] = user.id
token = self.db.auth_create_token(ctxt, token_dict)
return token, user
return None, None
|
Python
| 0.026024 |
@@ -4143,19 +4143,24 @@
ess_key(
-key
+username
)%0A
@@ -4182,24 +4182,21 @@
ser.
-name == username
+secret == key
:%0A
|
ecfadf8478b8775d8579812a7bd835f6ebb1ffd4
|
Add file lister for rclone export
|
util/rclone-list-files.py
|
util/rclone-list-files.py
|
Python
| 0 |
@@ -0,0 +1,627 @@
+#!/usr/bin/env python3%0Aimport glob%0A%0A# For use with --files-from argument for Rclone%0A# This suits Edgar's structure with is%0A# SPECIESNAME/%7Boccurrences%7Cprojected-distributions%7D/%5B2nd-to-latest-file-is-the-latest%5D.zip%0Afor folder in glob.glob('*'):%0A occurrences = glob.glob(folder + '/occurrences/*')%0A projected_distributions = glob.glob(folder + '/projected-distributions/*')%0A if not 'latest' in occurrences%5B-1%5D and not 'latest' in projected_distributions%5B-1%5D:%0A print(f'No latest in %7Bfolder%7D!')%0A exit(1)%0A%0A print(folder + '/metadata.json')%0A print(occurrences%5B-2%5D)%0A print(projected_distributions%5B-2%5D)%0A
|
|
47e3a59dd05f30f1ce0c41e0aa531987fb33606c
|
Generate watersheds for new EM dataset
|
new-em-watersheds.py
|
new-em-watersheds.py
|
Python
| 0.999865 |
@@ -0,0 +1,783 @@
+from gala import morpho%0Afrom gala import imio%0Aimport numpy as np%0Apr = imio.read_image_stack('membrane/*.tiff')%0Aws = morpho.watershed_sequence(pr / pr.max(), axis=0, connectivity=2, smooth_thresh=0.02, minimum_seed_size=2)%0Aimio.write_h5_stack(ws, 'watershed.lzf.h5', compression='lzf')%0Aslices = %5B(slice(None), slice(None, 625), slice(None, 625)),%0A (slice(None), slice(None, 625), slice(625, None)),%0A (slice(None), slice(625, None), slice(None, 625)),%0A (slice(None), slice(625, None), slice(625, None))%5D%0Awss = %5Bws%5Bs%5D for s in slices%5D%0Afrom skimage.measure import label%0Afor i, vol in enumerate(wss):%0A fn = 'watershed-%25i.lzf.h5' %25 i%0A vol_relabel = label(vol)%0A print(np.max(vol_relabel))%0A imio.write_h5_stack(vol_relabel, fn, compression='lzf')%0A %0A
|
|
629bd006bfd7e6210dcc95198be9b65614e4f051
|
Convert optimization_test.py to PyTorch
|
optimization_test_pytorch.py
|
optimization_test_pytorch.py
|
Python
| 0.999999 |
@@ -0,0 +1,1731 @@
+# coding=utf-8%0A# Copyright 2018 The Google AI Language Team Authors.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport optimization_pytorch as optimization%0Aimport torch%0Aimport unittest%0A%0A%0Aclass OptimizationTest(unittest.TestCase):%0A%0A def assertListAlmostEqual(self, list1, list2, tol):%0A self.assertEqual(len(list1), len(list2))%0A for a, b in zip(list1, list2):%0A self.assertAlmostEqual(a, b, delta=tol)%0A%0A def test_adam(self):%0A w = torch.tensor(%5B0.1, -0.2, -0.1%5D, requires_grad=True)%0A x = torch.tensor(%5B0.4, 0.2, -0.5%5D)%0A criterion = torch.nn.MSELoss(reduction='elementwise_mean')%0A optimizer = optimization.BERTAdam(params=%7Bw%7D, lr=0.2, schedule='warmup_linear', warmup=0.1, t_total=100)%0A for _ in range(100):%0A # TODO Solve: reduction='elementwise_mean'=True not taken into account so division by x.size(0) is necessary%0A loss = criterion(x, w) / x.size(0)%0A loss.backward()%0A optimizer.step()%0A self.assertListAlmostEqual(w.tolist(), %5B0.4, 0.2, -0.5%5D, tol=1e-2)%0A%0A%0Aif __name__ == %22__main__%22:%0A unittest.main()%0A
|
|
7da53597f9cb4117cecbaed1dbb77f4693289815
|
add a test for well locations endpoint
|
app/backend/wells/tests/test_wells.py
|
app/backend/wells/tests/test_wells.py
|
Python
| 0.000001 |
@@ -0,0 +1,999 @@
+%22%22%22%0A Licensed under the Apache License, Version 2.0 (the %22License%22);%0A you may not use this file except in compliance with the License.%0A You may obtain a copy of the License at%0A%0A http://www.apache.org/licenses/LICENSE-2.0%0A%0A Unless required by applicable law or agreed to in writing, software%0A distributed under the License is distributed on an %22AS IS%22 BASIS,%0A WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A See the License for the specific language governing permissions and%0A limitations under the License.%0A%22%22%22%0A%0Afrom django.urls import reverse%0A%0Afrom rest_framework.test import APITestCase%0Afrom rest_framework import status%0A%0A%0Aclass TestWellLocationsSearch(APITestCase):%0A%0A def test_well_locations(self):%0A # Basic test to ensure that the well location search returns a non-error response%0A url = reverse('well-locations')%0A response = self.client.get(url)%0A self.assertEqual(response.status_code, status.HTTP_200_OK)%0A
|
|
32f7fe6562f4d1592dfab5a9b065154dca51f1d3
|
Add rsync module
|
pyIsis/rsync.py
|
pyIsis/rsync.py
|
Python
| 0.000001 |
@@ -0,0 +1,1794 @@
+# -*- coding: utf-8 -*-%0A%0Aimport os%0Aimport subprocess%0Aimport logging%0A%0A%0A#RSYNC_PATH = os.path.join(%0A# os.path.abspath (os.path.dirname(__file__)), 'bin', 'rsync')%0ARSYNC_PATH = '/opt/rsync/bin/rsync'%0ARSYNC_CMD = '%7Bcmd%7D %7Boptions%7D %22%7Bsource%7D%22 %22%7Bdestination%7D%22'%0A%0Arsync_logger = logging.getLogger('avidisis')%0A%0A%0Aclass rsync(object):%0A %22%22%22%0A Run rsync as a subprocess sending output to a logger.%0A This class subclasses subprocess.Popen%0A %22%22%22%0A%0A def __init__(self, src, dst, *options):%0A self.src = src%0A self.dst = dst%0A self.options = options%0A rsync_logger.debug('rsync parameters: %7B%7D %7B%7D'.format(src, dst))%0A%0A def run(self):%0A cmd = RSYNC_CMD.format(%0A cmd=RSYNC_PATH,%0A options= ' '.join(self.options),%0A source=self.src,%0A destination=self.dst)%0A%0A process = subprocess.Popen(%0A cmd, shell=True,%0A stdout=subprocess.PIPE,%0A stderr=subprocess.STDOUT)%0A%0A output = ''%0A # Poll process for new output until finished%0A for line in iter(process.stdout.readline, %22%22):%0A rsync_logger.debug('------ %7B%7D'.format(line.strip('%5Cn%5Cr')))%0A #print '------ %7B%7D'.format(line.strip('%5Cn%5Cr'))%0A output += line%0A%0A process.wait()%0A exitCode = process.returncode%0A%0A if (exitCode == 0):%0A rsync_logger.info('Workspace %5B%7B%7D%5D backup done.'.format(%0A os.path.basename(self.src)))%0A return output%0A else:%0A rsync_logger.error('rsync exitCode: %7B%7D, ouput %7B%7D'.format(%0A exitCode, output))%0A raise Exception(cmd, exitCode, output)%0A%0A%0Aif __name__ == %22__main__%22:%0A r = rsync('/tmp/test/', '/tmp/test2', '-av', '--delete', '--exclude=%22*.log%22')%0A out = r.run()%0A print out%0A%0A%0A
|
|
49712dd43a2376c913e66cac7b52fc7247912e44
|
Make disable_builtins schema a property
|
flexget/plugins/operate/disable_builtins.py
|
flexget/plugins/operate/disable_builtins.py
|
from __future__ import unicode_literals, division, absolute_import
import logging
from flexget import plugin
from flexget.plugin import priority, register_plugin, plugins
log = logging.getLogger('builtins')
def all_builtins():
"""Helper function to return an iterator over all builtin plugins."""
return (plugin for plugin in plugins.itervalues() if plugin.builtin)
class PluginDisableBuiltins(object):
"""Disables all (or specific) builtin plugins from a task."""
def __init__(self):
# cannot trust that on_task_start would have been executed
self.disabled = []
# TODO: Shit, how was this ever working? If this plugin is loaded before any builtin plugins, they are not allowed
# in the schema.
schema = {
'oneOf': [
{'type': 'boolean'},
{'type': 'array', 'items': {'type': 'string', 'enum': [p.name for p in all_builtins()]}}
]
}
def debug(self):
log.debug('Builtin plugins: %s' % ', '.join(plugin.name for plugin in all_builtins()))
@priority(255)
def on_task_start(self, task, config):
self.disabled = []
if not config:
return
for plugin in all_builtins():
if config is True or plugin.name in config:
plugin.builtin = False
self.disabled.append(plugin.name)
log.debug('Disabled builtin plugin(s): %s' % ', '.join(self.disabled))
@priority(-255)
def on_task_exit(self, task, config):
if not self.disabled:
return
for name in self.disabled:
plugin.plugins[name].builtin = True
log.debug('Enabled builtin plugin(s): %s' % ', '.join(self.disabled))
self.disabled = []
on_task_abort = on_task_exit
register_plugin(PluginDisableBuiltins, 'disable_builtins', api_ver=2)
|
Python
| 0.000003 |
@@ -738,24 +738,154 @@
ema.
-%0A schema = %7B%0A
+ We need to change plugin loading to not access the schema until all plugins are loaded.%0A @property%0A def schema(self):%0A return %7B%0A
@@ -903,32 +903,36 @@
: %5B%0A
+
+
%7B'type': 'boolea
@@ -936,16 +936,20 @@
lean'%7D,%0A
+
@@ -1049,18 +1049,26 @@
-%5D%0A
+ %5D%0A
%7D%0A%0A
|
902953d99259b621b0ca2f69d17bd5563b3defbc
|
Fix bugs in mopidy.models revealed by new tests
|
mopidy/models.py
|
mopidy/models.py
|
from copy import copy
class Artist(object):
"""
:param uri: artist URI
:type uri: string
:param name: artist name
:type name: string
"""
def __init__(self, uri=None, name=None):
self._uri = None
self._name = name
@property
def uri(self):
"""The artist URI. Read-only."""
return self._uri
@property
def name(self):
"""The artist name. Read-only."""
return self._name
class Album(object):
"""
:param uri: album URI
:type uri: string
:param name: album name
:type name: string
:param artists: album artists
:type artists: list of :class:`Artist`
:param num_tracks: number of tracks in album
:type num_tracks: integer
"""
def __init__(self, uri=None, name=None, artists=None, num_tracks=0):
self._uri = uri
self._name = name
self._artists = artists or []
self._num_tracks = num_tracks
@property
def uri(self):
"""The album URI. Read-only."""
return self._uri
@property
def name(self):
"""The album name. Read-only."""
return self._name
@property
def artists(self):
"""List of :class:`Artist` elements. Read-only."""
return copy(self._artists)
@property
def num_tracks(self):
"""The number of tracks in the album. Read-only."""
return self._num_tracks
class Track(object):
"""
:param uri: track URI
:type uri: string
:param title: track title
:type title: string
:param artists: track artists
:type artists: list of :class:`Artist`
:param album: track album
:type album: :class:`Album`
:param track_no: track number in album
:type track_no: integer
:param date: track release date
:type date: :class:`datetime.date`
:param length: track length in milliseconds
:type length: integer
:param bitrate: bitrate in kbit/s
:type bitrate: integer
:param id: track ID (unique and non-changing as long as the process lives)
:type id: integer
"""
def __init__(self, uri=None, title=None, artists=None, album=None,
track_no=0, date=None, length=None, bitrate=None, id=None):
self._uri = uri
self._title = title
self._artists = artists or []
self._album = album
self._track_no = track_no
self._date = date
self._length = length
self._bitrate = bitrate
self._id = id
@property
def uri(self):
"""The track URI. Read-only."""
return self._uri
@property
def title(self):
"""The track title. Read-only."""
return self._title
@property
def artists(self):
"""List of :class:`Artist`. Read-only."""
return copy(self._artists)
@property
def album(self):
"""The track :class:`Album`. Read-only."""
return self._album
@property
def track_no(self):
"""The track number in album. Read-only."""
return self._track_no
@property
def date(self):
"""The track release date. Read-only."""
return self._date
@property
def length(self):
"""The track length in milliseconds. Read-only."""
return self._length
@property
def bitrate(self):
"""The track's bitrate in kbit/s. Read-only."""
return self._bitrate
@property
def id(self):
"""The track ID. Read-only."""
return self._id
def mpd_format(self, position=0):
"""
Format track for output to MPD client.
:param position: track's position in playlist
:type position: integer
:rtype: list of two-tuples
"""
return [
('file', self.uri or ''),
('Time', self.length and (self.length // 1000) or 0),
('Artist', self.mpd_format_artists()),
('Title', self.title or ''),
('Album', self.album and self.album.name or ''),
('Track', '%d/%d' % (self.track_no,
self.album and self.album.num_tracks or 0)),
('Date', self.date or ''),
('Pos', position),
('Id', self.id or position),
]
def mpd_format_artists(self):
"""
Format track artists for output to MPD client.
:rtype: string
"""
return u', '.join([a.name for a in self.artists])
class Playlist(object):
"""
:param uri: playlist URI
:type uri: string
:param name: playlist name
:type name: string
:param tracks: playlist's tracks
:type tracks: list of :class:`Track` elements
"""
def __init__(self, uri=None, name=None, tracks=None):
self._uri = uri
self._name = name
self._tracks = tracks or []
@property
def uri(self):
"""The playlist URI. Read-only."""
return self._uri
@property
def name(self):
"""The playlist name. Read-only."""
return self._name
@property
def tracks(self):
"""List of :class:`Track` elements. Read-only."""
return copy(self._tracks)
@property
def length(self):
"""The number of tracks in the playlist. Read-only."""
return len(self._tracks)
def mpd_format(self, start=0, end=None):
"""
Format playlist for output to MPD client.
Optionally limit output to the slice ``[start:end]`` of the playlist.
:param start: position of first track to include in output
:type start: int
:param end: position after last track to include in output
:type end: int or :class:`None` for end of list
:rtype: list of lists of two-tuples
"""
if end is None:
end = self.length
tracks = []
for track, position in zip(self.tracks, range(start, end)):
tracks.append(track.mpd_format(position))
return tracks
def with_(self, uri=None, name=None, tracks=None):
"""
Create a new playlist object with the given values. The values that are
not given are taken from the object the method is called on.
Does not change the object on which it is called.
:param uri: playlist URI
:type uri: string
:param name: playlist name
:type name: string
:param tracks: playlist's tracks
:type tracks: list of :class:`Track` elements
:rtype: :class:`Playlist`
"""
if uri is None:
uri = self.uri
if name is None:
name = self.name
if tracks is None:
tracks = self.tracks
return Playlist(uri=uri, name=name, tracks=tracks)
|
Python
| 0 |
@@ -221,20 +221,19 @@
._uri =
-None
+uri
%0A
@@ -5884,16 +5884,27 @@
f.tracks
+%5Bstart:end%5D
, range(
|
790be842b1c2e752210d5328dad05acb05d337bb
|
add minimal test for serial.theaded
|
test/test_threaded.py
|
test/test_threaded.py
|
Python
| 0.000015 |
@@ -0,0 +1,1406 @@
+#!/usr/bin/env python%0A#%0A# This file is part of pySerial - Cross platform serial port support for Python%0A# (C) 2016 Chris Liechti %[email protected]%3E%0A#%0A# SPDX-License-Identifier: BSD-3-Clause%0A%22%22%22%5C%0ATest serial.threaded related functionality.%0A%22%22%22%0A%0Aimport os%0Aimport unittest%0Aimport serial%0Aimport serial.threaded%0Aimport time%0A%0A%0A# on which port should the tests be performed:%0APORT = 'loop://'%0A%0Aclass Test_asyncio(unittest.TestCase):%0A %22%22%22Test asyncio related functionality%22%22%22%0A%0A def test_line_reader(self):%0A %22%22%22simple test of line reader class%22%22%22%0A%0A class TestLines(serial.threaded.LineReader):%0A def __init__(self):%0A super(TestLines, self).__init__()%0A self.received_lines = %5B%5D%0A%0A def handle_line(self, data):%0A self.received_lines.append(data)%0A%0A ser = serial.serial_for_url(PORT, baudrate=115200, timeout=1)%0A with serial.threaded.ReaderThread(ser, TestLines) as protocol:%0A protocol.write_line('hello')%0A time.sleep(1)%0A self.assertEqual(protocol.received_lines, %5B'hello'%5D)%0A%0A%0Aif __name__ == '__main__':%0A import sys%0A sys.stdout.write(__doc__)%0A if len(sys.argv) %3E 1:%0A PORT = sys.argv%5B1%5D%0A sys.stdout.write(%22Testing port: %7B!r%7D%5Cn%22.format(PORT))%0A sys.argv%5B1:%5D = %5B'-v'%5D%0A # When this module is executed from the command-line, it runs all its tests%0A unittest.main()%0A
|
|
70683aabe3cebda02db62fc254b7ec7532a50618
|
Add test_config.sample.py.
|
test_config.sample.py
|
test_config.sample.py
|
Python
| 0 |
@@ -0,0 +1,37 @@
+CREDENTIAL_FILE = ''%0ASHEET_NAME = ''%0A
|
|
73603c24ee3955c331bcfe0ff0dba7d0b04b51b4
|
Change Content-Encoding to gzip in memory_cache_http_server.
|
tools/telemetry/telemetry/core/memory_cache_http_server.py
|
tools/telemetry/telemetry/core/memory_cache_http_server.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import BaseHTTPServer
from collections import namedtuple
import mimetypes
import os
import SimpleHTTPServer
import SocketServer
import sys
import zlib
ByteRange = namedtuple('ByteRange', ['from_byte', 'to_byte'])
ResourceAndRange = namedtuple('ResourceAndRange', ['resource', 'byte_range'])
class MemoryCacheHTTPRequestHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
"""Serve a GET request."""
resource_range = self.SendHead()
if not resource_range or not resource_range.resource:
return
response = resource_range.resource['response']
if not resource_range.byte_range:
self.wfile.write(response)
return
start_index = resource_range.byte_range.from_byte
end_index = resource_range.byte_range.to_byte
self.wfile.write(response[start_index:end_index + 1])
def do_HEAD(self):
"""Serve a HEAD request."""
self.SendHead()
def SendHead(self):
path = self.translate_path(self.path)
if path not in self.server.resource_map:
self.send_error(404, 'File not found')
return None
resource = self.server.resource_map[path]
total_num_of_bytes = resource['content-length']
byte_range = self.GetByteRange(total_num_of_bytes)
if byte_range:
# request specified a range, so set response code to 206.
self.send_response(206)
self.send_header('Content-Range',
'bytes %d-%d/%d' % (byte_range.from_byte,
byte_range.to_byte,
total_num_of_bytes))
total_num_of_bytes = byte_range.to_byte - byte_range.from_byte + 1
else:
self.send_response(200)
self.send_header('Content-Length', str(total_num_of_bytes))
self.send_header('Content-Type', resource['content-type'])
self.send_header('Last-Modified',
self.date_time_string(resource['last-modified']))
if resource['zipped']:
self.send_header('Content-Encoding', 'deflate')
self.end_headers()
return ResourceAndRange(resource, byte_range)
def GetByteRange(self, total_num_of_bytes):
"""Parse the header and get the range values specified.
Args:
total_num_of_bytes: Total # of bytes in requested resource,
used to calculate upper range limit.
Returns:
A ByteRange namedtuple object with the requested byte-range values.
If no Range is explicitly requested or there is a failure parsing,
return None.
Special case: If range specified is in the format "N-", return N-N.
If upper range limit is greater than total # of bytes, return upper index.
"""
range_header = self.headers.getheader('Range')
if range_header is None:
return None
if not range_header.startswith('bytes='):
return None
# The range header is expected to be a string in this format:
# bytes=0-1
# Get the upper and lower limits of the specified byte-range.
# We've already confirmed that range_header starts with 'bytes='.
byte_range_values = range_header[len('bytes='):].split('-')
from_byte = 0
to_byte = 0
if len(byte_range_values) == 2:
from_byte = int(byte_range_values[0])
if byte_range_values[1]:
to_byte = int(byte_range_values[1])
else:
return None
# Do some validation.
if from_byte < 0:
return None
if to_byte < from_byte:
to_byte = from_byte
if to_byte >= total_num_of_bytes:
# End of range requested is greater than length of requested resource.
# Only return # of available bytes.
to_byte = total_num_of_bytes - 1
return ByteRange(from_byte, to_byte)
class MemoryCacheHTTPServer(SocketServer.ThreadingMixIn,
BaseHTTPServer.HTTPServer):
# Increase the request queue size. The default value, 5, is set in
# SocketServer.TCPServer (the parent of BaseHTTPServer.HTTPServer).
# Since we're intercepting many domains through this single server,
# it is quite possible to get more than 5 concurrent requests.
request_queue_size = 128
def __init__(self, host_port, handler, paths):
BaseHTTPServer.HTTPServer.__init__(self, host_port, handler)
self.resource_map = {}
for path in paths:
if os.path.isdir(path):
self.AddDirectoryToResourceMap(path)
else:
self.AddFileToResourceMap(path)
def AddDirectoryToResourceMap(self, directory_path):
"""Loads all files in directory_path into the in-memory resource map."""
for root, dirs, files in os.walk(directory_path):
# Skip hidden files and folders (like .svn and .git).
files = [f for f in files if f[0] != '.']
dirs[:] = [d for d in dirs if d[0] != '.']
for f in files:
file_path = os.path.join(root, f)
if not os.path.exists(file_path): # Allow for '.#' files
continue
self.AddFileToResourceMap(file_path)
def AddFileToResourceMap(self, file_path):
"""Loads file_path into the in-memory resource map."""
with open(file_path, 'rb') as fd:
response = fd.read()
fs = os.fstat(fd.fileno())
content_type = mimetypes.guess_type(file_path)[0]
zipped = False
if content_type in ['text/html', 'text/css', 'application/javascript']:
zipped = True
response = zlib.compress(response, 9)
self.resource_map[file_path] = {
'content-type': content_type,
'content-length': len(response),
'last-modified': fs.st_mtime,
'response': response,
'zipped': zipped
}
index = os.path.sep + 'index.html'
if file_path.endswith(index):
self.resource_map[
file_path[:-len(index)]] = self.resource_map[file_path]
def Main():
assert len(sys.argv) > 2, 'usage: %prog <port> [<path1>, <path2>, ...]'
port = int(sys.argv[1])
paths = sys.argv[2:]
server_address = ('127.0.0.1', port)
MemoryCacheHTTPRequestHandler.protocol_version = 'HTTP/1.1'
httpd = MemoryCacheHTTPServer(server_address, MemoryCacheHTTPRequestHandler,
paths)
httpd.serve_forever()
if __name__ == '__main__':
Main()
|
Python
| 0.000163 |
@@ -216,16 +216,28 @@
edtuple%0A
+import gzip%0A
import m
@@ -310,19 +310,24 @@
ort
-sys
+StringIO
%0Aimport
zlib
@@ -322,20 +322,19 @@
%0Aimport
-zlib
+sys
%0A%0A%0AByteR
@@ -2181,15 +2181,12 @@
', '
-deflate
+gzip
')%0A
@@ -5490,44 +5490,203 @@
-response = zlib.compress(response, 9
+sio = StringIO.StringIO()%0A gzf = gzip.GzipFile(fileobj=sio, compresslevel=9, mode='wb')%0A gzf.write(response)%0A gzf.close()%0A response = sio.getvalue()%0A sio.close(
)%0A
|
499adce8b5c23d60073d4c92259e611609ee0c61
|
Add initial draft script to analyse Maven deps
|
states/common/maven/artifacts/check_dependencies.py
|
states/common/maven/artifacts/check_dependencies.py
|
Python
| 0 |
@@ -0,0 +1,636 @@
+#!/usr/bin/env python%0A%0Aimport subprocess as sub%0Aimport yaml%0Aimport re%0A%0Adistrib_pom_path = '/home/uvsmtid/Works/maritime-singapore.git/clearsea-distribution/pom.xml'%0A%0A# Resolve (download) all dependencies locally so that next command%0A# can work offline.%0Asub.check_call(%0A %5B%0A 'mvn',%0A '-f',%0A distrib_pom_path,%0A 'dependency:resolve',%0A %5D,%0A)%0A%0A# Get list of all dependencies. %0Ap = sub.Popen(%0A %5B%0A 'mvn',%0A '-f',%0A distrib_pom_path,%0A 'dependency:list',%0A %5D,%0A stdout = sub.PIPE,%0A)%0A%0A# Select lines with dependency items.%0Aartifact_regex = re.compile(')%0Afor line in p.stdout:%0A %0A
|
|
1a065a251c3337ae7741af1916c51f2edcb9180f
|
add db.py
|
www/transwarp/db.py
|
www/transwarp/db.py
|
Python
| 0.000002 |
@@ -0,0 +1,1165 @@
+#/usr/bin/python%0A#_*_ coding:utf-8 _*_%0A%0Aimport threading%0A%0Aclass _Engine(object):%0A %22%22%22%0A %E6%95%B0%E6%8D%AE%E5%BA%93%E5%BC%95%E6%93%8E%E5%AF%B9%E8%B1%A1%0A %22%22%22%0A def __init__(self, connect):%0A self._connect = connect%0A def connect(self):%0A return self._connect()%0A%0Aengine = None%0A%0Aclass _DbCtx(threading.local):%0A %22%22%22%0A %E6%8C%81%E6%9C%89%E6%95%B0%E6%8D%AE%E5%BA%93%E8%BF%9E%E6%8E%A5%E7%9A%84%E4%B8%8A%E4%B8%8B%E6%96%87%E5%AF%B9%E8%B1%A1%0A %22%22%22%0A def __init__(self):%0A self.connection = None%0A self.transactions = 0%0A%0A def is_init(self):%0A return not self.connection is None%0A%0A def init(self):%0A self.connection = _LasyConnection()%0A self.transactions = 0%0A%0A def cleanup(self):%0A self.connection.cleanup()%0A self.connection = None%0A%0A def cursor(self):%0A return self.connection.cursor()%0A%0A_db_ctx = _DbCtx()%0A%0Aclass _ConnectionCtx(object):%0A def __enter__(self):%0A global _db_ctx%0A self.should_cleanup = False%0A if not _db_ctx.is_init():%0A _db_ctx.is_init()%0A self.should_cleanup = True%0A return self%0A%0A def __exit__(self, exc_type, exc_val, exc_tb):%0A global _db_ctx%0A if self.should_cleanup:%0A _db_ctx.cleanup()%0A%0A def connection():%0A return _ConnectionCtx()%0A%0A%0A%0A%0A%0A%0A%0A
|
|
6fd75772efac321517a1d8c01addfa5cbbf7caf0
|
Add test file for user functions.
|
tests/db/user_test.py
|
tests/db/user_test.py
|
Python
| 0 |
@@ -0,0 +1,694 @@
+from okcupyd.db import user%0A%0Adef test_have_messaged_before(T):%0A message_thread_model = T.factory.message_thread()%0A assert user.have_messaged_by_username(%0A message_thread_model.initiator.handle,%0A message_thread_model.respondent.handle%0A )%0A assert user.have_messaged_by_username(%0A message_thread_model.respondent.handle,%0A message_thread_model.initiator.handle%0A )%0A%0A assert not user.have_messaged_by_username('a', 'b')%0A assert not user.have_messaged_by_username(%0A message_thread_model.respondent.handle, 'a'%0A )%0A%0A T.factory.user('b')%0A assert not user.have_messaged_by_username(%0A 'b', message_thread_model.initiator.handle%0A )%0A
|
|
6d2480c5817a8ba7a4a810378ce8fabe0ede3cbf
|
check YAML examples
|
tests/testexamples.py
|
tests/testexamples.py
|
Python
| 0.000001 |
@@ -0,0 +1,210 @@
+#!/usr/bin/python%0A%0Aimport os%0Aimport yaml%0A%0Adef test_examples():%0A for filename in os.listdir(%22examples/%22):%0A with open(os.path.join(%22examples%22, filename)) as stream:%0A print(yaml.load(stream))%0A
|
|
a9bb7c7c929b0e182160a700e0a3f23dc3e81765
|
Update and rename exercises to exercises/12.py
|
exercises/12.py
|
exercises/12.py
|
Python
| 0.000001 |
@@ -0,0 +1,264 @@
+'''%0A%0ADefine a procedure histogram() that takes a list of%0Aintegers and prints a histogram to the screen.%0AFor example, histogram(%5B4, 9, 7%5D) should print the following:%0A%0A****%0A*********%0A*******%0A%0A'''%0A%0A%0Adef histogram(lst):%0A for item in lst:%0A print(item * '*')%0A
|
|
a0d2e58a6eecf3427646f311e638c359706e806a
|
Add Energenie example code
|
energenie.py
|
energenie.py
|
Python
| 0 |
@@ -0,0 +1,1154 @@
+import RPi.GPIO as GPIO%0Afrom time import sleep%0A%0Abit1 = 11%0Abit2 = 15%0Abit3 = 16%0Abit4 = 13%0A%0AGPIO.setmode(GPIO.BOARD)%0AGPIO.setwarnings(False)%0A%0AGPIO.setup(bit1, GPIO.OUT)%0AGPIO.setup(bit2, GPIO.OUT)%0AGPIO.setup(bit3, GPIO.OUT)%0AGPIO.setup(bit4, GPIO.OUT)%0A%0AGPIO.setup(18, GPIO.OUT)%0AGPIO.setup(22, GPIO.OUT)%0A%0AGPIO.output(22, False)%0AGPIO.output(18, False)%0A%0AGPIO.output(bit1, False)%0AGPIO.output(bit2, False)%0AGPIO.output(bit3, False)%0AGPIO.output(bit4, False)%0A%0Aon = %5B'1011', '0111', '0110', '0101', '0100'%5D%0Aoff = %5B'0011', '1111', '1110', '1101', '1100'%5D%0A%0Adef change_plug_state(socket, on_or_off):%0A state = on_or_off%5Bsocket%5D%5B-1%5D == '1'%0A GPIO.output(bit1, state)%0A state = on_or_off%5Bsocket%5D%5B-2%5D == '1'%0A GPIO.output(bit2, state)%0A state = on_or_off%5Bsocket%5D%5B-3%5D == '1'%0A GPIO.output(bit3, state)%0A state = on_or_off%5Bsocket%5D%5B-4%5D == '1'%0A GPIO.output(bit4, state)%0A sleep(0.1)%0A GPIO.output(22, True)%0A sleep(0.25)%0A GPIO.output(22, False)%0A%0Awhile True:%0A raw_input('Hit any key to turn on: ')%0A print('turning on')%0A change_plug_state(2, on)%0A raw_input('Hit any key to turn off: ')%0A print('turning off')%0A change_plug_state(0, off)%0A
|
|
c086b750c30c3a265821be02364f8670d71ebdc4
|
remove "free" from list of customer types
|
corehq/apps/domain/forms.py
|
corehq/apps/domain/forms.py
|
import re
from django import forms
from django.contrib.auth.models import User
import django_tables as tables
from django.core.validators import validate_email
from django.forms.fields import ChoiceField, CharField
from django.utils.encoding import smart_str
from corehq.apps.domain.middleware import _SESSION_KEY_SELECTED_DOMAIN
from corehq.apps.domain.models import Domain
########################################################################################################
#
# From http://www.peterbe.com/plog/automatically-strip-whitespace-in-django-forms
#
# I'll put this in each app, so they can be standalone, but it should really go in some centralized
# part of the distro.
#
# Need to remember to call:
#
# super(_BaseForm, self).clean() in any derived class that overrides clean()
from corehq.apps.domain.utils import new_domain_re
from dimagi.utils.timezones.fields import TimeZoneField
from dimagi.utils.timezones.forms import TimeZoneChoiceField
from corehq.apps.users.util import format_username
class _BaseForm(object):
def clean(self):
for field in self.cleaned_data:
if isinstance(self.cleaned_data[field], basestring):
self.cleaned_data[field] = self.cleaned_data[field].strip()
return self.cleaned_data
########################################################################################################
class DomainModelChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
return obj.name
########################################################################################################
class DomainBoundModelChoiceField(forms.ModelChoiceField):
"""A model choice field for an object that is restricted by domain"""
_domain = None
def _get_domain(self):
return self._domain
def _set_domain(self, domain):
_domain = domain
self.queryset = self.queryset.model.objects.filter(domain_membership__domain=domain)
domain = property(_get_domain, _set_domain)
########################################################################################################
class DomainSelectionForm(forms.Form):
domain_list = DomainModelChoiceField(queryset=[], empty_label=None, label="Project List")
def __init__(self, domain_list=None, *args, **kwargs):
super(DomainSelectionForm, self).__init__(*args, **kwargs)
# Here's how we set the runtime filtering of the domains to be displayed in the
# selector box
if domain_list is not None:
self.fields['domain_list'].queryset = domain_list
def save( self,
request,
selected_domain_key = _SESSION_KEY_SELECTED_DOMAIN ):
d = Domain(id = self.cleaned_data['domain_list'].id,
name = self.cleaned_data['domain_list'].name )
request.session[selected_domain_key] = d
request.user.selected_domain = d
return True
########################################################################################################
class DomainGlobalSettingsForm(forms.Form):
default_timezone = TimeZoneChoiceField(label="Default Timezone", initial="UTC")
case_sharing = ChoiceField(label='Case Sharing', choices=(('false', 'Off'), ('true', 'On')))
def clean_default_timezone(self):
data = self.cleaned_data['default_timezone']
timezone_field = TimeZoneField()
timezone_field.run_validators(data)
return smart_str(data)
def save(self, request, domain):
try:
domain.default_timezone = self.cleaned_data['default_timezone']
domain.case_sharing = self.cleaned_data['case_sharing'] == 'true'
domain.save()
return True
except Exception:
return False
class DomainMetadataForm(forms.Form):
city = CharField(label="City", required=False)
country = CharField(label="Country", required=False)
region = CharField(label="Region", required=False,
help_text="e.g. US, LAC, SA, Sub-Saharan Africa, Southeast Asia, etc.")
project_type = CharField(label="Project Category", required=False,
help_text="e.g. MCH, HIV, etc.")
customer_type = ChoiceField(label='Customer Type',
choices=(('free', 'Free'), ('basic', 'Basic'),
('plus', 'Plus'), ('full', 'Full')))
is_test = ChoiceField(label='Test Project', choices=(('false', 'Real'), ('true', 'Test')))
def save(self, request, domain):
try:
domain.city = self.cleaned_data['city']
domain.country = self.cleaned_data['country']
domain.region = self.cleaned_data['region']
domain.project_type = self.cleaned_data['project_type']
domain.customer_type = self.cleaned_data['customer_type']
domain.is_test = self.cleaned_data['is_test'] == 'true'
domain.save()
return True
except Exception:
return False
########################################################################################################
min_pwd = 4
max_pwd = 20
pwd_pattern = re.compile( r"([-\w]){" + str(min_pwd) + ',' + str(max_pwd) + '}' )
def clean_password(txt):
if len(txt) < min_pwd:
raise forms.ValidationError('Password is too short; must be at least %s characters' % min_pwd )
if len(txt) > max_pwd:
raise forms.ValidationError('Password is too long; must be less than %s characters' % max_pwd )
if not pwd_pattern.match(txt):
raise forms.ValidationError('Password may only contain letters, numbers, hyphens, and underscores')
return txt
########################################################################################################
class UpdateSelfForm(_BaseForm, forms.Form):
first_name = forms.CharField(label='First name', max_length=User._meta.get_field('first_name').max_length)
last_name = forms.CharField(label='Last (family) name', max_length=User._meta.get_field('last_name').max_length)
email = forms.EmailField(label ='Email address', max_length=User._meta.get_field('email').max_length)
########################################################################################################
class UpdateSelfTable(tables.Table):
property = tables.Column(verbose_name="Property")
old_val= tables.Column(verbose_name="Old value")
new_val= tables.Column(verbose_name="New value")
|
Python
| 0.000302 |
@@ -4422,84 +4422,25 @@
=(('
-free', 'Free'), ('basic', 'Basic'),%0A
+basic', 'Basic'),
('p
|
8646929a913b77438bf58e48e672ea68492d3ac1
|
Mark third_party/accessibility-developer-tools as a known license info issue.
|
android_webview/tools/known_issues.py
|
android_webview/tools/known_issues.py
|
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""List of known-incompatibly-licensed directories for Android WebView.
This is not used by the webview_licenses tool itself; it is effectively a
"cache" of the output of webview_licenses.GetIncompatibleDirectories() for the
subset of repositories that WebView needs.
We store a copy here because GetIncompatibleDirectories() doesn't work properly
after things have been removed from the tree - it can no longer see the
README.chromium files for previously-removed directories, but they may have
newly added files in them. As long as this list is up to date, we can remove the
things listed first, and then just run the tool afterwards to validate that it
was sufficient. If the tool returns any extra directories then the snapshotting
process will stop and this list must be updated.
"""
# If there is a temporary license-related issue with a particular third_party
# directory, please put it here, with a comment linking to the bug entry.
KNOWN_ISSUES = []
KNOWN_INCOMPATIBLE = {
# Incompatible code in the main chromium repository.
'.': [
'base/third_party/xdg_mime',
'breakpad',
'chrome/installer/mac/third_party/xz',
'chrome/test/data',
'third_party/active_doc',
'third_party/apple_apsl',
'third_party/apple_sample_code',
'third_party/bsdiff',
'third_party/bspatch',
'third_party/sudden_motion_sensor',
'third_party/swiftshader',
'third_party/talloc',
'third_party/webdriver',
'third_party/wtl',
'tools/telemetry/third_party/websocket-client',
],
# Incompatible code in ICU.
'third_party/icu': [
'source/data/brkitr',
],
}
KNOWN_INCOMPATIBLE['.'].extend(KNOWN_ISSUES)
|
Python
| 0.000178 |
@@ -1121,16 +1121,83 @@
SUES = %5B
+%0A 'third_party/accessibility-developer-tools', # crbug.com/165901%0A
%5D%0A%0AKNOWN
|
a5440305173c218ec785b0d5a2dfa8b02bb0b731
|
Add package: py-fava (#21275)
|
var/spack/repos/builtin/packages/py-fava/package.py
|
var/spack/repos/builtin/packages/py-fava/package.py
|
Python
| 0 |
@@ -0,0 +1,1932 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass PyFava(PythonPackage):%0A %22%22%22Fava is a web interface for the double-entry bookkeeping software%0A Beancount with a focus on features and usability.%22%22%22%0A%0A homepage = %22https://beancount.github.io/fava/%22%0A pypi = %22fava/fava-1.18.tar.gz%22%0A%0A version('1.18', sha256='21336b695708497e6f00cab77135b174c51feb2713b657e0e208282960885bf5')%0A%0A # For some reason Fava adds a whole bunch of executables to%0A # its bin directory, and this causes clashes when loading%0A # the module.%0A extends('python', ignore='bin/%5E(?!fava).*')%0A%0A # Some of the dependencies are not listed as required at%0A # build or run time, but actually are.%0A # - py-setuptools%0A # - py-importlib%0A # - py-pytest%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-setuptools', type=('build', 'run'))%0A depends_on('py-setuptools-scm', type=('build'))%0A%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-cheroot', type=('build', 'run'))%0A depends_on('py-click', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-importlib', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('py-ply', type=('build', 'run'))%0A depends_on('py-pytest', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A
|
|
c404981d3a1d18248d6fa2fa18cc16037353d377
|
Update main.py
|
lib/main.py
|
lib/main.py
|
try:
import sys
import re
import subprocess
import os
from core.configparser import ConfigParser
from nmap import Nmap
from mount_detect import MountDetect
from core.threadpool import Worker,ThreadPool
from core.common import *
except ImportError,e:
import sys
sys.stdout.write("%s\n" %e)
sys.exit(1)
class Main:
"""
Main Class and Functions ...
"""
def __init__(self, config_file, wipe = None):
"""
Nmap init functions set variables
"""
self.config_file = config_file
self.wipe = wipe
self.session_id = 0
self.share_reg = re.compile("^Disk\|[^$]+\|")
self.status_reg = re.compile("[0-9]+")
self.share_file_reg = re.compile("[0-9]+,")
current_dir = os.getcwd()
self.share_session = current_dir + "/" + "sessions/share.session"
self.sharestatus_session = current_dir + "/" + "sessions/sharestatus.session"
self.nmap_path = "/usr/bin/nmap"
self.mount_cifs_path = "/sbin/mount.cifs"
self.mount_path = "/bin/mount"
self.umount_path = "/bin/umount"
self.smbclient_path = "/usr/bin/smbclient"
self.find_path = "/usr/bin/find"
self.curl_path = "/usr/bin/curl"
packages = [ self.nmap_path, self.mount_cifs_path, self.umount_path, self.mount_path, self.smbclient_path, self.find_path, self.curl_path ]
for pkg in packages:
if not os.path.isfilepkg):
print >> sys.stderr, bcolors.OKBLUE + "Error : " + bcolors.ENDC + bcolors.FAIL + "Package %s exists but file %s doesn't exists"% (pkg,packages[pkg])
sys.exit(1)
self.config_result = ConfigParser.parse(self.config_file)
def is_sharestatus_file(self):
"""
Check sharestatus.session file and whether there are some records in it or not ...
"""
if not os.path.isfile(self.sharestatus_session):
return None
try:
read_file = open(self.sharestatus_session, "r").read().splitlines()
except:
return None
for line in read_file:
if re.match(self.status_reg, line):
return line
return None
def is_share_file(self):
"""
Check sharestatus.session file and whether there are some records in it or not ...
"""
if not os.path.isfile(self.share_session):
return None
try:
read_file = open(self.share_session, "r").read().splitlines()
except:
return None
for line in read_file:
if re.match(self.share_file_reg, line):
return True
return None
def feed_sessions(self, ip, path):
"""
Feed Sessions ...
"""
try:
session_file = open(self.share_session,"a")
except Exception, err_mess:
print err_mess
sys.exit(1)
sess_id = str(self.session_id) + "," + ip + "," + path + "\n"
session_file.write(sess_id)
session_file.close()
self.session_id = self.session_id + 1
def list_sharing(self, ip, output_file):
"""
Listing sharing ...
"""
if self.config_result["username"] and self.config_result["password"] and self.config_result["domain"]:
creds = "'" + self.config_result["domain"] + "\\" + self.config_result["username"] + "%" + self.config_result["password"] + "'"
run_smbclient = "%s -L %s -U %s -g 2>/dev/null"% (self.smbclient_path, ip, creds)
else:
run_smbclient = "%s -L %s -N -g 2>/dev/null"% (self.smbclient_path, ip)
# debug
#print "Command to run: " + run_smbclient
proc = subprocess.Popen([run_smbclient], shell = True, stdout = subprocess.PIPE,)
share_name = None
for line in iter(proc.stdout.readline,''):
share_result = line.rstrip()
if re.match(self.share_reg, share_result):
if not share_name:
share_name = str(share_result.split("|")[1])
else:
share_name = share_name + ":" + str(share_result.split("|")[1])
if share_name:
self.feed_sessions(ip, share_name)
def run(self):
"""
Run Nmap Operations ..
"""
# if wipe is 1, remove all session file
if self.wipe == 1:
try:
os.remove(self.share_session)
os.remove(self.sharestatus_session)
except:
pass
include_ip = self.config_result["include_ip"]
exclude_ip = self.config_result["exclude_ip"]
self.nmap = Nmap(self.nmap_path)
nmap_result = self.nmap.port_scan(include_ip, exclude_ip)
if nmap_result:
thread_count = int(self.config_result["scanning_thread"])
output_file = self.config_result["output_file"]
# debug
#print "Thread count to run nmap %s"% thread_count
pool = ThreadPool(thread_count)
for ip in nmap_result:
pool.add_task(self.list_sharing, ip, output_file)
pool.wait_completion()
#try:
mount_detect = MountDetect(self.config_file, self.share_session, self.sharestatus_session, self.mount_path, self.umount_path, self.find_path, self.curl_path)
#except:
#print "Error when initializing mountdetect class ..."
#sys.exit(1)
share_status = self.is_sharestatus_file()
# if share status file exists
if share_status :
#debug
#print "SessionStatus exists , go go go ..."
rest_line = self.is_sharestatus_file()
if rest_line:
mount_detect.run(int(rest_line))
else:
print "Error getting data from SessionStatus file"
sys.exit(1)
# if share status file doesn't exists
else:
share_file = self.is_share_file()
# if share.session file exists
if share_file:
#debug
#print "There is no SessionStatus file but Share file exists , go go go ..."
mount_detect.run(0)
# if share.session file doesn't exists
else:
#debug
#print "There is no session file. Bye ..."
sys.exit(1)
|
Python
| 0.000001 |
@@ -1412,16 +1412,17 @@
h.isfile
+(
pkg):%0A
|
31342e58f914c057404fd35edfff42b95e5fb051
|
Test #2 (with the current GitLab API syntax)
|
gitlabform/gitlabform/test/test_project_settings.py
|
gitlabform/gitlabform/test/test_project_settings.py
|
Python
| 0 |
@@ -0,0 +1,1153 @@
+import pytest%0A%0Afrom gitlabform.gitlabform import GitLabForm%0Afrom gitlabform.gitlabform.test import create_group, create_project_in_group, get_gitlab, GROUP_NAME%0A%0APROJECT_NAME = 'project_settings_project'%0AGROUP_AND_PROJECT_NAME = GROUP_NAME + '/' + PROJECT_NAME%0A%0A%[email protected](scope=%22module%22)%0Adef gitlab(request):%0A create_group(GROUP_NAME)%0A create_project_in_group(GROUP_NAME, PROJECT_NAME)%0A%0A gl = get_gitlab()%0A%0A def fin():%0A gl.delete_project(GROUP_AND_PROJECT_NAME)%0A%0A request.addfinalizer(fin)%0A return gl # provide fixture value%0A%0A%0Aconfig_builds_for_private_projects = %22%22%22%0Agitlab:%0A api_version: 4%0A%0Aproject_settings:%0A project_settings:%0A builds_access_level: private%0A visibility: private%0A%22%22%22%0A%0A%0Aclass TestProjectSettings:%0A%0A def test__builds_for_private_projects(self, gitlab):%0A gf = GitLabForm(config_string=config_builds_for_private_projects,%0A project_or_group=GROUP_AND_PROJECT_NAME)%0A gf.main()%0A%0A settings = gitlab.get_project_settings(GROUP_AND_PROJECT_NAME)%0A assert settings%5B'builds_access_level'%5D is 'private'%0A assert settings%5B'visibility'%5D is 'private'%0A
|
|
caf135f6c94146038ac9d9e77a808e30ea52f900
|
make pyroma a runnable module (#62)
|
pyroma/__main__.py
|
pyroma/__main__.py
|
Python
| 0 |
@@ -0,0 +1,59 @@
+from . import main%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
25a1d94b45980fbc78c162af2c81ad807ee954c9
|
add test_vpr.py, add test functions and stubs
|
wradlib/tests/test_vpr.py
|
wradlib/tests/test_vpr.py
|
Python
| 0 |
@@ -0,0 +1,2037 @@
+#!/usr/bin/env python%0A# -------------------------------------------------------------------------------%0A# Name: test_vpr.py%0A# Purpose: testing file for the wradlib.vpr module%0A#%0A# Authors: wradlib developers%0A#%0A# Created: 26.02.2016%0A# Copyright: (c) wradlib developers%0A# Licence: The MIT License%0A# -------------------------------------------------------------------------------%0A%0Aimport unittest%0A%0Aimport wradlib.vpr as vpr%0Aimport wradlib.georef as georef%0Aimport numpy as np%0A%0Aclass VPRHelperFunctionsTest(unittest.TestCase):%0A def setUp(self):%0A self.site = (7.0, 53.0, 100.)%0A self.proj = georef.epsg_to_osr(31467)%0A self.az = np.arange(0., 360., 1.)%0A self.r = np.arange(0, 100000, 1000)%0A self.el = 2.5%0A self.coords = vpr.volcoords_from_polar(self.site, self.el, self.az, self.r, self.proj)%0A%0A def test_out_of_range(self):%0A pass%0A%0A def test_blindspots(self):%0A pass%0A%0A def test_volcoords_from_polar(self):%0A coords = vpr.volcoords_from_polar(self.site, self.el, self.az, self.r, self.proj)%0A pass%0A%0A def test_volcoords_from_polar_irregular(self):%0A coords = vpr.volcoords_from_polar_irregular(self.site, %5Bself.el, 5.0%5D, self.az, self.r, self.proj)%0A pass%0A%0A def test_synthetic_polar_volume(self):%0A vol = vpr.synthetic_polar_volume(self.coords)%0A pass%0A%0A def test_vpr_interpolator(self):%0A pass%0A%0A def test_correct_vpr(self):%0A pass%0A%0A def test_mean_norm_from_vpr(self):%0A pass%0A%0A def test_norm_vpr_stats(self):%0A pass%0A%0A def test_make_3D_grid(self):%0A maxrange = 200000.%0A maxalt = 5000.%0A horiz_res = 2000.%0A vert_res = 250.%0A vpr.make_3D_grid(self.site, self.proj, maxrange, maxalt, horiz_res, vert_res)%0A pass%0A%0Aclass CartesianVolumeTest(unittest.TestCase):%0A def test_CartesianVolume(self):%0A pass%0A%0A def test_CAPPI(self):%0A pass%0A%0A def test_PseudoCAPPI(self):%0A pass%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
9ad3b4e6ff5ec500fe1feeb841c4fe00e9267d19
|
add sh_quote.py
|
python/sh_quote.py
|
python/sh_quote.py
|
Python
| 0.000004 |
@@ -0,0 +1,957 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2014 Tristan Cavelier %[email protected]%3E%0A# This program is free software. It comes without any warranty, to%0A# the extent permitted by applicable law. You can redistribute it%0A# and/or modify it under the terms of the Do What The Fuck You Want%0A# To Public License, Version 2, as published by Sam Hocevar. See%0A# http://www.wtfpl.net/ for more details.%0A%0Adef sh_quote(*params):%0A return %22 %22.join((%22'%22 + p.replace(%22'%22, %22'%5C%5C''%22) + %22'%22 for p in params))%0A%0A%0A### in bash you can do :%0A# eval -- %22$(python sh_quote.py)%22%0A%0A### in python3 you can do :%0A# import os, sys%0A# out = os.popen(sh_quote(*%5B'ls', '-1', %22my'file;%22%5D))%0A# out._proc.wait()%0A# sys.stdout.write(out.read())%0A%0A######################################################################%0A# Tests%0A%0A# prints if failure%0Adef test(a, b):%0A if a != b:%0A print(a + %22 != %22 + b)%0A%0Atest(sh_quote(*%5B'ls', '-1', %22my'file;%22%5D), %22'ls' '-1' 'my'%5C%5C''file;'%22)%0A
|
|
bfd786dad5b19d46c8ce956b904fe23c378c9202
|
Patch usage of period in key. (#4020)
|
src/sentry/tasks/merge.py
|
src/sentry/tasks/merge.py
|
"""
sentry.tasks.merge
~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
import logging
from django.db import DataError, IntegrityError, router, transaction
from django.db.models import F
from sentry.tasks.base import instrumented_task, retry
from sentry.tasks.deletion import delete_group
# TODO(dcramer): probably should have a new logger for this, but it removes data
# so lets bundle under deletions
logger = logging.getLogger('sentry.deletions.merge')
@instrumented_task(name='sentry.tasks.merge.merge_group', queue='merge',
default_retry_delay=60 * 5, max_retries=None)
@retry
def merge_group(from_object_id=None, to_object_id=None, **kwargs):
# TODO(mattrobenolt): Write tests for all of this
from sentry.models import (
Activity, Group, GroupAssignee, GroupHash, GroupRuleStatus,
GroupSubscription, GroupTagKey, GroupTagValue, EventMapping, Event,
UserReport, GroupRedirect, GroupMeta,
)
if not (from_object_id and to_object_id):
logger.error('merge_group.malformed.missing_params')
return
try:
group = Group.objects.get(id=from_object_id)
except Group.DoesNotExist:
logger.warn('merge_group.malformed.invalid_id', extra={'object_id': from_object_id})
return
try:
new_group = Group.objects.get(id=to_object_id)
except Group.DoesNotExist:
logger.warn('merge_group.malformed.invalid_id', extra={'object_id': from_object_id})
return
model_list = (
Activity, GroupAssignee, GroupHash, GroupRuleStatus, GroupSubscription,
GroupTagValue, GroupTagKey, EventMapping, Event, UserReport,
GroupRedirect, GroupMeta,
)
has_more = merge_objects(model_list, group, new_group, logger=logger)
if has_more:
merge_group.delay(
from_object_id=from_object_id,
to_object_id=to_object_id,
)
return
previous_group_id = group.id
group.delete()
try:
with transaction.atomic():
GroupRedirect.objects.create(
group_id=new_group.id,
previous_group_id=previous_group_id,
)
except IntegrityError:
pass
new_group.update(
# TODO(dcramer): ideally these would be SQL clauses
first_seen=min(group.first_seen, new_group.first_seen),
last_seen=max(group.last_seen, new_group.last_seen),
)
try:
# it's possible to hit an out of range value for counters
new_group.update(
times_seen=F('times_seen') + group.times_seen,
num_comments=F('num_comments') + group.num_comments,
)
except DataError:
pass
@instrumented_task(name='sentry.tasks.merge.rehash_group_events', queue='merge',
default_retry_delay=60 * 5, max_retries=None)
@retry
def rehash_group_events(group_id, **kwargs):
from sentry.models import Group, GroupHash
group = Group.objects.get(id=group_id)
# Clear out existing hashes to preempt new events being added
# This can cause the new groups to be created before we get to them, but
# its a tradeoff we're willing to take
GroupHash.objects.filter(group=group).delete()
has_more = _rehash_group_events(group)
if has_more:
rehash_group_events.delay(
group_id=group.id
)
return
delete_group.delay(group.id)
def _rehash_group_events(group, limit=100):
from sentry.event_manager import (
EventManager, get_hashes_from_fingerprint, generate_culprit,
md5_from_hash
)
from sentry.models import Event, Group
event_list = list(Event.objects.filter(group_id=group.id)[:limit])
Event.objects.bind_nodes(event_list, 'data')
for event in event_list:
fingerprint = event.data.get('fingerprint', ['{{ default }}'])
if fingerprint and not isinstance(fingerprint, (list, tuple)):
fingerprint = [fingerprint]
elif not fingerprint:
fingerprint = ['{{ default }}']
manager = EventManager({})
group_kwargs = {
'message': event.message,
'platform': event.platform,
'culprit': generate_culprit(event.data),
'logger': event.get_tag('logger') or group.logger,
'level': group.level,
'last_seen': event.datetime,
'first_seen': event.datetime,
'data': group.data,
}
# XXX(dcramer): doesnt support checksums as they're not stored
hashes = map(md5_from_hash, get_hashes_from_fingerprint(event, fingerprint))
for hash in hashes:
new_group, _, _, _ = manager._save_aggregate(
event=event,
hashes=hashes,
release=None,
**group_kwargs
)
event.update(group_id=new_group.id)
if event.data.get('tags'):
Group.objects.add_tags(new_group, event.data['tags'])
return bool(event_list)
def merge_objects(models, group, new_group, limit=1000,
logger=None):
from sentry.models import GroupTagKey, GroupTagValue
has_more = False
for model in models:
if logger is not None:
logger.info('%s.merge' % model.__name__.lower(), extra={
'group_id.old': group.id,
'group_id.new': new_group.id
})
all_fields = model._meta.get_all_field_names()
has_group = 'group' in all_fields
if has_group:
queryset = model.objects.filter(group=group)
else:
queryset = model.objects.filter(group_id=group.id)
for obj in queryset[:limit]:
try:
with transaction.atomic(using=router.db_for_write(model)):
if has_group:
model.objects.filter(
id=obj.id
).update(group=new_group)
else:
model.objects.filter(
id=obj.id
).update(group_id=new_group.id)
except IntegrityError:
delete = True
else:
delete = False
if delete:
# Before deleting, we want to merge in counts
try:
if model == GroupTagKey:
with transaction.atomic(using=router.db_for_write(model)):
model.objects.filter(
group=new_group,
key=obj.key,
).update(values_seen=F('values_seen') + obj.values_seen)
elif model == GroupTagValue:
with transaction.atomic(using=router.db_for_write(model)):
model.objects.filter(
group=new_group,
key=obj.key,
value=obj.value,
).update(times_seen=F('times_seen') + obj.times_seen)
except DataError:
# it's possible to hit an out of range value for counters
pass
obj.delete()
has_more = True
if has_more:
return True
return has_more
|
Python
| 0 |
@@ -5457,24 +5457,28 @@
'
+new_
group_id
.old': g
@@ -5473,15 +5473,15 @@
p_id
-.old':
+': new_
grou
@@ -5503,24 +5503,28 @@
'
+old_
group_id
.new': n
@@ -5519,27 +5519,19 @@
p_id
-.new': new_
+':
group.id
%0A
@@ -5518,32 +5518,33 @@
up_id': group.id
+,
%0A %7D)%0A
|
2273dfcdb2f311f39e1bffe4f40cdc6e3b753155
|
add buildOffsetMap.py
|
buildOffsetMap.py
|
buildOffsetMap.py
|
Python
| 0.000001 |
@@ -0,0 +1,724 @@
+import sys, os, json%0Aimport frm%0A%0ADATA_PATH = %22data%22%0A%0Adef main():%0A%09if len(sys.argv) != 2:%0A%09%09print %22USAGE: %25s IMAGES_LIST%22 %25 sys.argv%5B0%5D%0A%09%09sys.exit(1)%0A%0A%09images = list(open(sys.argv%5B1%5D))%0A%09imageInfo = %7B%7D%0A%0A%09for image in images:%0A%09%09image = image.rstrip()%0A%09%09frmPath = os.path.join(DATA_PATH, image + %22.FRM%22)%0A%09%09frmInfo = frm.readFRMInfo(open(frmPath, %22rb%22))%0A%0A%09%09sx = 0 # running total width offset%0A%09%09for direction in frmInfo%5B'frameOffsets'%5D:%0A%09%09%09ox = 0 # running total offsets%0A%09%09%09oy = 0%0A%09%09%09for frame in direction:%0A%09%09%09%09ox += frame%5B'x'%5D%0A%09%09%09%09oy += frame%5B'y'%5D%0A%09%09%09%09frame%5B'sx'%5D = sx%0A%09%09%09%09frame%5B'ox'%5D = ox%0A%09%09%09%09frame%5B'oy'%5D = oy%0A%09%09%09%09sx += frame%5B'w'%5D%0A%0A%09%09imageInfo%5Bimage%5D = frmInfo%0A%0A%09print json.dumps(imageInfo)%0A%0Aif __name__ == '__main__':%0A%09main()
|
|
a62b4f70816b831a16973e861449b0c76761cf52
|
Create Odd_Even_Linked_List.py
|
data_structures/Linked_list/Python/Odd_Even_Linked_List.py
|
data_structures/Linked_list/Python/Odd_Even_Linked_List.py
|
Python
| 0 |
@@ -0,0 +1,709 @@
+'''%0AGiven the head of a singly linked list, group all the nodes with odd indices together followed by the nodes with even indices, and return the reordered list.%0A'''%0Aclass Solution(object):%0A def oddEvenList(self, head):%0A if head is None: return None%0A if head.next is None: return head%0A o = head%0A p = o.next%0A ehead = p%0A while p.next is not None:%0A o.next = p.next%0A p.next = p.next.next%0A %0A o = o.next%0A p = p.next%0A if p is None: break%0A o.next = ehead%0A return head%0A %0A'''%0AInput: head = %5B1,2,3,4,5%5D%0AOutput: %5B1,3,5,2,4%5D%0A----------------------%0AInput: head = %5B1,2,3,4,5%5D%0AOutput: %5B1,3,5,2,4%5D%0A'''%0A
|
|
dd0e335574afd936b5849186202aedc8500f2c5b
|
add build-front
|
organization/core/management/commands/build-front.py
|
organization/core/management/commands/build-front.py
|
Python
| 0.000017 |
@@ -0,0 +1,1660 @@
+# -*- coding: utf-8 -*-%0A#%0A# Copyright (c) 2016-2017 Ircam%0A# Copyright (c) 2016-2017 Guillaume Pellerin%0A# Copyright (c) 2016-2017 Emilie Zawadzki%0A%0A# This file is part of mezzanine-organization.%0A%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU Affero General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU Affero General Public License for more details.%0A%0A# You should have received a copy of the GNU Affero General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0Aimport os, time%0Aimport subprocess%0Afrom django.apps import apps%0Afrom optparse import make_option%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom django.db import connections%0A%0Aclass Command(BaseCommand):%0A help = %22Build the front with bower and gulp%22%0A%0A def handle(self, *args, **options):%0A theme = %22%22%0A theme_path = %22%22%0A for ht in settings.HOST_THEMES:%0A # search for theme name in INSTALLED_APPS%0A # to get the one is used%0A if ht%5B1%5D in settings.INSTALLED_APPS:%0A theme = ht%5B1%5D%0A%0A if theme :%0A theme_path = apps.get_app_config(theme.split('.')%5B1%5D).path%0A os.chdir(theme_path)%0A subprocess.run(%5B%22bower%22, %22--allow-root%22, %22install%22, %22&&%22, %22gulp%22, %22build%22%5D)%0A
|
|
9b5db6d8edd9c65d5811f6bd64147e9d1b17b8c9
|
Remove an unneeded platform check in lint_test_expectations.py
|
Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py
|
Tools/Scripts/webkitpy/layout_tests/lint_test_expectations.py
|
# Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import optparse
import signal
import traceback
from webkitpy.common.host import Host
from webkitpy.layout_tests.models import test_expectations
from webkitpy.layout_tests.port import platform_options
# This mirrors what the shell normally does.
INTERRUPTED_EXIT_STATUS = signal.SIGINT + 128
# This is a randomly chosen exit code that can be tested against to
# indicate that an unexpected exception occurred.
EXCEPTIONAL_EXIT_STATUS = 254
_log = logging.getLogger(__name__)
def lint(host, options):
# FIXME: Remove this when we remove the --chromium flag (crbug.com/245504).
if options.platform == 'chromium':
options.platform = None
ports_to_lint = [host.port_factory.get(name) for name in host.port_factory.all_port_names(options.platform)]
files_linted = set()
lint_failed = False
for port_to_lint in ports_to_lint:
expectations_dict = port_to_lint.expectations_dict()
for expectations_file in expectations_dict.keys():
if expectations_file in files_linted:
continue
try:
test_expectations.TestExpectations(port_to_lint,
expectations_dict={expectations_file: expectations_dict[expectations_file]},
is_lint_mode=True)
except test_expectations.ParseError as e:
lint_failed = True
_log.error('')
for warning in e.warnings:
_log.error(warning)
_log.error('')
files_linted.add(expectations_file)
return lint_failed
def check_virtual_test_suites(host, options):
port = host.port_factory.get(options=options)
fs = host.filesystem
layout_tests_dir = port.layout_tests_dir()
virtual_suites = port.virtual_test_suites()
check_failed = False
for suite in virtual_suites:
comps = [layout_tests_dir] + suite.name.split('/') + ['README.txt']
path_to_readme = fs.join(*comps)
if not fs.exists(path_to_readme):
_log.error('LayoutTests/%s/README.txt is missing (each virtual suite must have one).' % suite.name)
check_failed = True
if check_failed:
_log.error('')
return check_failed
def set_up_logging(logging_stream):
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(logging_stream)
logger.addHandler(handler)
return (logger, handler)
def tear_down_logging(logger, handler):
logger.removeHandler(handler)
def run_checks(host, options, logging_stream):
logger, handler = set_up_logging(logging_stream)
try:
lint_failed = lint(host, options)
check_failed = check_virtual_test_suites(host, options)
if lint_failed or check_failed:
_log.error('Lint failed.')
return 1
else:
_log.info('Lint succeeded.')
return 0
finally:
logger.removeHandler(handler)
def main(argv, _, stderr):
parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
options, _ = parser.parse_args(argv)
if options.platform and 'test' in options.platform:
# It's a bit lame to import mocks into real code, but this allows the user
# to run tests against the test platform interactively, which is useful for
# debugging test failures.
from webkitpy.common.host_mock import MockHost
host = MockHost()
else:
host = Host()
try:
exit_status = run_checks(host, options, stderr)
except KeyboardInterrupt:
exit_status = INTERRUPTED_EXIT_STATUS
except Exception as e:
print >> stderr, '\n%s raised: %s' % (e.__class__.__name__, str(e))
traceback.print_exc(file=stderr)
exit_status = EXCEPTIONAL_EXIT_STATUS
return exit_status
|
Python
| 0.000016 |
@@ -2048,160 +2048,8 @@
s):%0A
- # FIXME: Remove this when we remove the --chromium flag (crbug.com/245504).%0A if options.platform == 'chromium':%0A options.platform = None%0A%0A
|
cc2343a564572e6c0bd94279bf3907e9e85da79b
|
Create version.py
|
plotly-1.2.9/plotly/version.py
|
plotly-1.2.9/plotly/version.py
|
Python
| 0.000001 |
@@ -0,0 +1,22 @@
+__version__ = '1.2.9'%0A
|
|
65239ce01df89ceaaed989b28f4623ac521ce2c3
|
Add download_stats script
|
download_stats.py
|
download_stats.py
|
Python
| 0 |
@@ -0,0 +1,990 @@
+import argparse%0Afrom xmlrpclib import ServerProxy%0Aimport pickle%0A%0Aparser = argparse.ArgumentParser()%0Aparser.add_argument(%22-n%22, type=int)%0Aparser.add_argument(%22--package-list%22,%0A action=%22store%22)%0Aargs = parser.parse_args()%0A%0Aurl = 'https://pypi.python.org/pypi'%0Aclient = ServerProxy(url)%0A%0Aif not args.package_list:%0A args.package_list = client.list_packages()%0Aelse:%0A args.package_list = %5Bpackage.strip() for package in%0A open(args.package_list, 'r').readlines()%5D%0A%0Aif args.n:%0A args.package_list = args.package_list%5B:args.n%5D%0A%0Adownloads_dict = dict()%0Afor package in args.package_list:%0A versions = client.package_releases(package)%0A try:%0A latest_version = versions%5B0%5D%0A downloads = max(client.release_data(package,%0A latest_version)%5B'downloads'%5D.values())%0A downloads_dict%5Bpackage%5D = downloads%0A except:%0A downloads_dict%5Bpackage%5D = 0%0A%0Apickle.dump(downloads_dict, open('downloads_dict.pkl', 'w'))%0A
|
|
95a26454173b59c8609ddb81027ed71005e9e86c
|
add module to handle exceptions
|
cno/misc/tools.py
|
cno/misc/tools.py
|
Python
| 0.000001 |
@@ -0,0 +1,170 @@
+__all__ = %5B%22CNOError%22%5D%0A %0A%0Aclass CNOError(Exception):%0A def __init__(self, value):%0A self.value = value%0A def __str__(self):%0A return repr(self.value)%0A%0A
|
|
e1fc330ae40c5ca87958538982849db051f2947c
|
fix issue #55 - erase_data fails on Python 3.x
|
colorama/win32.py
|
colorama/win32.py
|
# Copyright Jonathan Hartley 2013. BSD 3-Clause license, see LICENSE file.
# from winbase.h
STDOUT = -11
STDERR = -12
try:
import ctypes
from ctypes import LibraryLoader
windll = LibraryLoader(ctypes.WinDLL)
from ctypes import wintypes
except (AttributeError, ImportError):
windll = None
SetConsoleTextAttribute = lambda *_: None
else:
from ctypes import (
byref, Structure, c_char, c_short, c_uint32, c_ushort, POINTER
)
COORD = wintypes._COORD
class CONSOLE_SCREEN_BUFFER_INFO(Structure):
"""struct in wincon.h."""
_fields_ = [
("dwSize", COORD),
("dwCursorPosition", COORD),
("wAttributes", wintypes.WORD),
("srWindow", wintypes.SMALL_RECT),
("dwMaximumWindowSize", COORD),
]
def __str__(self):
return '(%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d)' % (
self.dwSize.Y, self.dwSize.X
, self.dwCursorPosition.Y, self.dwCursorPosition.X
, self.wAttributes
, self.srWindow.Top, self.srWindow.Left, self.srWindow.Bottom, self.srWindow.Right
, self.dwMaximumWindowSize.Y, self.dwMaximumWindowSize.X
)
_GetStdHandle = windll.kernel32.GetStdHandle
_GetStdHandle.argtypes = [
wintypes.DWORD,
]
_GetStdHandle.restype = wintypes.HANDLE
_GetConsoleScreenBufferInfo = windll.kernel32.GetConsoleScreenBufferInfo
_GetConsoleScreenBufferInfo.argtypes = [
wintypes.HANDLE,
POINTER(CONSOLE_SCREEN_BUFFER_INFO),
]
_GetConsoleScreenBufferInfo.restype = wintypes.BOOL
_SetConsoleTextAttribute = windll.kernel32.SetConsoleTextAttribute
_SetConsoleTextAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
]
_SetConsoleTextAttribute.restype = wintypes.BOOL
_SetConsoleCursorPosition = windll.kernel32.SetConsoleCursorPosition
_SetConsoleCursorPosition.argtypes = [
wintypes.HANDLE,
COORD,
]
_SetConsoleCursorPosition.restype = wintypes.BOOL
_FillConsoleOutputCharacterA = windll.kernel32.FillConsoleOutputCharacterA
_FillConsoleOutputCharacterA.argtypes = [
wintypes.HANDLE,
c_char,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputCharacterA.restype = wintypes.BOOL
_FillConsoleOutputAttribute = windll.kernel32.FillConsoleOutputAttribute
_FillConsoleOutputAttribute.argtypes = [
wintypes.HANDLE,
wintypes.WORD,
wintypes.DWORD,
COORD,
POINTER(wintypes.DWORD),
]
_FillConsoleOutputAttribute.restype = wintypes.BOOL
handles = {
STDOUT: _GetStdHandle(STDOUT),
STDERR: _GetStdHandle(STDERR),
}
def GetConsoleScreenBufferInfo(stream_id=STDOUT):
handle = handles[stream_id]
csbi = CONSOLE_SCREEN_BUFFER_INFO()
success = _GetConsoleScreenBufferInfo(
handle, byref(csbi))
return csbi
def SetConsoleTextAttribute(stream_id, attrs):
handle = handles[stream_id]
return _SetConsoleTextAttribute(handle, attrs)
def SetConsoleCursorPosition(stream_id, position):
position = COORD(*position)
# If the position is out of range, do nothing.
if position.Y <= 0 or position.X <= 0:
return
# Adjust for Windows' SetConsoleCursorPosition:
# 1. being 0-based, while ANSI is 1-based.
# 2. expecting (x,y), while ANSI uses (y,x).
adjusted_position = COORD(position.Y - 1, position.X - 1)
# Adjust for viewport's scroll position
sr = GetConsoleScreenBufferInfo(STDOUT).srWindow
adjusted_position.Y += sr.Top
adjusted_position.X += sr.Left
# Resume normal processing
handle = handles[stream_id]
return _SetConsoleCursorPosition(handle, adjusted_position)
def FillConsoleOutputCharacter(stream_id, char, length, start):
handle = handles[stream_id]
char = c_char(char)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
success = _FillConsoleOutputCharacterA(
handle, char, length, start, byref(num_written))
return num_written.value
def FillConsoleOutputAttribute(stream_id, attr, length, start):
''' FillConsoleOutputAttribute( hConsole, csbi.wAttributes, dwConSize, coordScreen, &cCharsWritten )'''
handle = handles[stream_id]
attribute = wintypes.WORD(attr)
length = wintypes.DWORD(length)
num_written = wintypes.DWORD(0)
# Note that this is hard-coded for ANSI (vs wide) bytes.
return _FillConsoleOutputAttribute(
handle, attribute, length, start, byref(num_written))
|
Python
| 0.000001 |
@@ -4059,16 +4059,25 @@
har(char
+.encode()
)%0A
|
bb58564dc400e91c132e3a26532595ec9de73958
|
Create managers.py
|
managers.py
|
managers.py
|
Python
| 0.000001 |
@@ -0,0 +1,496 @@
+from django.db import models%0A%0A%0Aclass VisibilityManagerMixin(object):%0A %22%22%22%0A This manager should be used with a model that implements the Hideable%0A mixin.%0A %22%22%22%0A%0A def __init__(self, *args, **kwargs):%0A self.visible = kwargs.pop('visible', True)%0A super().__init__(*args, **kwargs)%0A%0A def get_queryset(self):%0A return super().get_queryset()%0A .filter(hidden__isnull=self.visible)%0A%0Aclass VisibilityManager(VisibilityManagerMixin, models.Manager):%0A pass%0A
|
|
fbaaf3ba027ee9d18df7b1f48533c8847f084381
|
Add harmonic mean estimator.
|
harmonicmean.py
|
harmonicmean.py
|
Python
| 0.000003 |
@@ -0,0 +1,2691 @@
+import numpy.random%0Aimport numpy as np%0Aimport lib%0Afrom math import log%0A%0A%0Adef compute_harmonicmean(lnlike_post, posterior_sample=None, lnlikefunc=None,%0A lnlikeargs=(), **kwargs):%0A %22%22%22%0A Computes the harmonic mean estimate of the marginal likelihood.%0A%0A The estimation is based on n posterior samples%0A (indexed by s, with s = 0, ..., n-1), but can be done directly if the%0A log(likelihood) in this sample is passed.%0A%0A :param array lnlike_post:%0A log(likelihood) computed over a posterior sample. 1-D array of length n.%0A If an emply array is given, then compute from posterior sample.%0A%0A :param array posterior_sample:%0A A sample from the parameter posterior distribution.%0A Dimensions are (n x k), where k is the number of parameters. If None%0A the computation is done using the log(likelihood) obtained from the%0A posterior sample.%0A%0A :param callable lnlikefunc:%0A Function to compute ln(likelihood) on the marginal samples.%0A%0A :param tuple lnlikeargs:%0A Extra arguments passed to the likelihood function.%0A%0A Other parameters%0A ----------------%0A :param int size:%0A Size of sample to use for computation. If none is given, use size of%0A given array or posterior sample.%0A%0A References%0A ----------%0A Kass & Raftery (1995), JASA vol. 90, N. 430, pp. 773-795%0A %22%22%22%0A%0A if len(lnlike_post) == 0 and posterior_sample is not None:%0A%0A samplesize = kwargs.pop('size', len(posterior_sample))%0A%0A if samplesize %3C len(posterior_sample):%0A posterior_subsample = numpy.random.choice(posterior_sample,%0A size=samplesize,%0A replace=False)%0A else:%0A posterior_subsample = posterior_sample.copy()%0A%0A # Compute log likelihood in posterior sample.%0A log_likelihood = lnlikefunc(posterior_subsample, *lnlikeargs)%0A%0A elif len(lnlike_post) %3E 0:%0A samplesize = kwargs.pop('size', len(lnlike_post))%0A log_likelihood = numpy.random.choice(lnlike_post, size=samplesize,%0A replace=False)%0A%0A # Use identity for summation%0A # http://en.wikipedia.org/wiki/List_of_logarithmic_identities#Summation.2Fsubtraction%0A # ln(sum(x)) = ln(x%5B0%5D) + ln(1 + sum( exp( ln(x%5B1:%5D) - ln(x%5B0%5D) ) ) )%0A%0A hme = -lib.log_sum(-log_likelihood) + log(len(log_likelihood))%0A%0A return hme%0A%0A%0Adef run_hme_mc(log_likelihood, nmc, samplesize):%0A hme = np.zeros(nmc)%0A for i in range(nmc):%0A hme%5Bi%5D = compute_harmonicmean(log_likelihood, size=samplesize)%0A%0A return hme%0A%0A__author__ = 'Rodrigo F. Diaz'
|
|
156a31c7aef3dfc07f5e3b0998b0957308abdd16
|
Create getPrice.py
|
src/getPrice.py
|
src/getPrice.py
|
Python
| 0.000001 |
@@ -0,0 +1,939 @@
+import requests%0Aimport requests.auth%0Aimport time%0Aimport json%0A%0Adef getPrices():%0A print %22Grabbing price...%22%0A dogeprice = parsePrices(%22doge%22)%0A btcprice = parsePrices(%22btc%22)%0A ltcprice = parsePrices(%22ltc%22)%0A rddprice = parsePrices(%22rdd%22)%0A obj3 = open('price.txt', 'w')%0A obj3.write(str(dogeprice) + %22%5Cn%22 + str(btcprice) + '%5Cn' + str(ltcprice) + '%5Cn' + str(rddprice))%0A obj3.close()%0A print 'Done'%0Adef parsePrices(currency):%0A code = requests.get('http://coinmarketcap.northpole.ro/api/' + currency + '.json')%0A json_input = code.json()%0A decoded = json.dumps(json_input)%0A decoded = json.loads(decoded)%0A price = decoded%5B'price'%5D%0A price = float(price)%0A price = 1.3 * 4 / price %0A price = round(price,7)%0A return price%0Awhile True:%0A getPrices()%0A for x in range(2700,-1,-1):%0A print x%0A x+=1%0A time.sleep(1)%0A
|
|
082f11f5a24efd21f05b1d7cc7f1b1f1ab91fb0c
|
Add exercise 13: couplage
|
prologin-2014/13_couplage.py
|
prologin-2014/13_couplage.py
|
Python
| 0.001185 |
@@ -0,0 +1,1645 @@
+# http://www.prologin.org/training/challenge/demi2014/couplage%0Afrom sys import stdin%0A%0AnbBowlsFirst = int(stdin.readline())%0AnbBowlsSecond = int(stdin.readline())%0AbowlsFirst = %5Bint(x) for x in stdin.readline().split()%5D%0AbowlsSecond = %5Bint(x) for x in stdin.readline().split()%5D%0A%0Adef maxInTwoLists(first, second):%0A%09%22%22%22Find the max value present in two lists%22%22%22%0A%09maxFirst = max(first)%0A%09maxSecond = max(second)%0A%0A%09if (maxFirst == maxSecond):%0A%09%09return maxFirst%0A%09elif (maxFirst %3C maxSecond):%0A%09%09second.remove(maxSecond)%0A%09%09return maxInTwoLists(first, second)%0A%09else:%0A%09%09first.remove(maxFirst)%0A%09%09return maxInTwoLists(first, second)%0A%0Adef optimize(acc, first, second):%0A%09# If a list is empty, stop here%0A%09if len(first) == 0 or len(second) == 0:%0A%09%09return acc%0A%0A%09# Try to reach the max value in these lists%0A%09maxValue = maxInTwoLists(first, second)%0A%0A%09# If we have matching bowls before the maxValue, count them%0A%09for i in range(min(first.index(maxValue), second.index(maxValue))):%0A%09%09if (first%5Bi%5D == second%5Bi%5D):%0A%09%09%09return optimize(acc + first%5Bi%5D, first%5Bi+1:%5D, second%5Bi+1:%5D)%0A%0A%09# Determine the index of the maxValue in both lists%0A%09firstIndex = first.index(maxValue)%0A%09secondIndex = second.index(maxValue)%0A%0A%09# Maybe it would be better to not reach this maxValue.%0A%09# Delete it from the first list and try that%0A%09firstWithoutMax = list(first)%0A%09firstWithoutMax.remove(maxValue)%0A%0A%09return max(%0A%09%09# Go straight to the maxValue in both lists and continue with tails%0A%09%09optimize(acc + maxValue, first%5BfirstIndex+1:%5D, second%5BsecondIndex+1:%5D),%0A%09%09# Maybe it would be better to not reach this maximum%0A%09%09optimize(acc, firstWithoutMax, second)%0A%09)%0A%0Aprint optimize(0, bowlsFirst, bowlsSecond)
|
|
f8a0aa92c8e19bc11f8a609733644afe0efed5c8
|
Update test script to do match testing.
|
decompose_test.py
|
decompose_test.py
|
Python
| 0 |
@@ -0,0 +1,1663 @@
+from util.decompose_graph import decompose_graph%0A%0Afrom core.himesis_utils import expand_graph, set_do_pickle, set_compression%0A%0Aset_do_pickle(True)%0Aset_compression(6)%0A%0Afile_name = %22226482067288742734644994685633991185819%22%0A%0Agraph = expand_graph(file_name)%0A%0Aprint(graph.name)%0A%0Afrom core.himesis_utils import load_directory%0Acontracts = load_directory(%22mbeddr2C_MM/Contracts/%22)%0A%0Aatomic_contracts = %5B%0A 'AssignmentInstance'%0A%5D%0A%0Aif_then_contracts = %5B%5D%0Aprop_if_then_contracts = %5B%5D%0A%0Afrom core.himesis_utils import graph_to_dot, load_directory%0Afrom util.test_script_utils import select_rules, get_sub_and_super_classes,%5C%0A load_transformation, changePropertyProverMetamodel, set_supertypes, load_contracts%0Afrom util.slicer import Slicer%0Afrom util.parser import load_parser%0AinputMM = %22./mbeddr2C_MM/ecore_metamodels/Module.ecore%22%0AoutputMM = %22./mbeddr2C_MM/ecore_metamodels/C.ecore%22%0Asubclasses_dict, superclasses_dict = get_sub_and_super_classes(inputMM, outputMM)%0A%0Aatomic_contracts, if_then_contracts = load_contracts(contracts, superclasses_dict,%0A atomic_contracts, if_then_contracts,%0A prop_if_then_contracts,%0A False)%0A%0Acontract =atomic_contracts%5B0%5D%5B1%5D%0Aprint(contract)%0Aprint(contract.has_pivots())%0A%0A#graph_to_dot(graph.name, graph, force_trace_links = True)%0A%0Aimport time%0A%0Aprint(%22Starting to check%22)%0Astart_time = time.time()%0Aresult = contract.check(graph)%0Aprint(result)%0Aprint(%22Finished in %22 + str(time.time() - start_time) + %22 seconds%22)%0A%0A%0A#decompose_graph(graph)
|
|
8c98e313caeb82ee710d56399d5de7cf1eb1f7df
|
Add DNA Coding
|
python/src/dna/dna_coding.py
|
python/src/dna/dna_coding.py
|
Python
| 0.000595 |
@@ -0,0 +1,1083 @@
+import pandas as pd%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0Aimport src.mylib.mfile as mfile%0Afrom matplotlib import style%0A%0Astop =int('101010', 2) #101010 I Ching 63 After Completion%0Abefo =int('010101', 2) #101010 I Ching 64 Before Completion%0A%0Aguai =int('111110', 2) #101010 I Ching 43%0A%0Aqian =int('111111', 2) #101010 I Ching 01%0Akun =int('000000', 2) #101010 I Ching 02%0A%0Adf = mfile.loadOneSymbol(%22JPY=X%22, %22../db/forex.db%22)%0Adf = df.reset_index(drop=True)%0Adf = df%5B'Close'%5D%0Adf = df%5B-1000:%5D%0Adf = df.diff()%0Adf = df.dropna()%0Afn = lambda x: (1.0 if x %3E 0 else 0.0)%0Axx = df.apply(fn)%0Axx = xx.values%0Aln = len(xx)%0Asz = (ln // 6) * 6%0Axx = xx%5B:sz%5D%0Aprint(xx)%0A#L0 = xx%5B:-2%5D%0A#L1 = xx%5B1:-1%5D%0A#L2 = xx%5B2:%5D%0A#yy = L0 * 4 + L1 * 2 + L2%0Adef my_func(arr, num):%0A sum = 0%0A for i in range(num):%0A sum += arr%5Bi%5D * (2**(num-i-1))%0A return sum%0Axx = np.reshape(xx, (-1, 6))%0Ayy = np.apply_along_axis(my_func, 1, xx, 6)%0Ai, = np.where(yy == stop)%0Azz = np.copy(yy)%0Azz%5Bzz != stop%5D = np.nan%0Ass = yy%0Asp = range(0, len(ss))%0Astyle.use('ggplot')%0Aplt.plot(ss)%0Aplt.plot(zz, 'bo')%0Aprint(ss)%0Aplt.show()%0A
|
|
1be041fd9bfc856fd59fba52501823d80d3ff037
|
Create setup.py
|
neutron/setup.py
|
neutron/setup.py
|
Python
| 0.000001 |
@@ -0,0 +1 @@
+%0A
|
|
a4f030cf10683aa949550e9922c3ea72421cf392
|
Update PCI configuration options
|
nova/conf/pci.py
|
nova/conf/pci.py
|
# needs:check_opt_group_and_type
# Copyright (c) 2013 Intel, Inc.
# Copyright (c) 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
pci_opts = [
cfg.MultiStrOpt('pci_alias',
default=[],
help="""
An alias for a PCI passthrough device requirement.
This allows users to specify the alias in the extra_spec for a flavor, without
needing to repeat all the PCI property requirements.
Possible Values:
* A list of JSON values which describe the aliases. For example:
pci_alias = {
"name": "QuickAssist",
"product_id": "0443",
"vendor_id": "8086",
"device_type": "type-PCI"
}
defines an alias for the Intel QuickAssist card. (multi valued). Valid key
values are :
* "name"
* "product_id"
* "vendor_id"
* "device_type"
"""),
cfg.MultiStrOpt('pci_passthrough_whitelist',
default=[],
help="""
White list of PCI devices available to VMs.
Possible values:
* A JSON dictionary which describe a whitelisted PCI device. It should take
the following format:
["device_id": "<id>",] ["product_id": "<id>",]
["address": "[[[[<domain>]:]<bus>]:][<slot>][.[<function>]]" |
"devname": "PCI Device Name",]
{"tag": "<tag_value>",}
where '[' indicates zero or one occurrences, '{' indicates zero or multiple
occurrences, and '|' mutually exclusive options. Note that any missing
fields are automatically wildcarded. Valid examples are:
pci_passthrough_whitelist = {"devname":"eth0",
"physical_network":"physnet"}
pci_passthrough_whitelist = {"address":"*:0a:00.*"}
pci_passthrough_whitelist = {"address":":0a:00.",
"physical_network":"physnet1"}
pci_passthrough_whitelist = {"vendor_id":"1137",
"product_id":"0071"}
pci_passthrough_whitelist = {"vendor_id":"1137",
"product_id":"0071",
"address": "0000:0a:00.1",
"physical_network":"physnet1"}
The following are invalid, as they specify mutually exclusive options:
pci_passthrough_whitelist = {"devname":"eth0",
"physical_network":"physnet",
"address":"*:0a:00.*"}
* A JSON list of JSON dictionaries corresponding to the above format. For
example:
pci_passthrough_whitelist = [{"product_id":"0001", "vendor_id":"8086"},
{"product_id":"0002", "vendor_id":"8086"}]
""")
]
def register_opts(conf):
conf.register_opts(pci_opts)
def list_opts():
# TODO(sfinucan): This should be moved into the PCI group and
# oslo_config.cfg.OptGroup used
return {'DEFAULT': pci_opts}
|
Python
| 0.000002 |
@@ -1330,59 +1330,246 @@
ame%22
-%0A * %22product_id%22%0A * %22vendor_id%22%0A * %22device_type%22
+: Name of the PCI alias.%0A * %22product_id%22: Product ID of the device in hexadecimal.%0A * %22vendor_id%22: Vendor ID of the device in hexadecimal.%0A * %22device_type%22: Type of PCI device. Valid values are: %22type-PCI%22,%0A %22type-PF%22 and %22type-VF%22.
%0A%22%22%22
@@ -1827,22 +1827,22 @@
%0A%0A %5B%22
-device
+vendor
_id%22: %22%3C
@@ -1960,23 +1960,14 @@
%22: %22
-PCI Device N
+%3Cn
ame
+%3E
%22,%5D%0A
@@ -1972,19 +1972,21 @@
%5D%0A %7B%22
+%3C
tag
+%3E
%22: %22%3Ctag
@@ -1999,17 +1999,17 @@
%3E%22,%7D%0A%0A
-w
+W
here '%5B'
@@ -2186,16 +2186,427 @@
dcarded.
+%0A%0A Valid key values are :%0A%0A * %22vendor_id%22: Vendor ID of the device in hexadecimal.%0A * %22product_id%22: Product ID of the device in hexadecimal.%0A * %22address%22: PCI address of the device.%0A * %22devname%22: Device name of the device (for e.g. interface name). Not all%0A PCI devices have a name.%0A * %22%3Ctag%3E%22: Additional %3Ctag%3E and %3Ctag_value%3E used for matching PCI devices.%0A Supported %3Ctag%3E: %22physical_network%22.%0A%0A
Valid e
|
08122e57235e836dbfd4230e9e3ad3f7c54072ff
|
add simple debug callback test case
|
pycurl/tests/test_debug.py
|
pycurl/tests/test_debug.py
|
Python
| 0 |
@@ -0,0 +1,209 @@
+# $Id$%0A%0Aimport pycurl%0A%0Adef test(**args):%0A print args%0A%0Ac = pycurl.init()%0Ac.setopt(pycurl.URL, 'http://curl.haxx.se/')%0Ac.setopt(pycurl.VERBOSE, 1)%0Ac.setopt(pycurl.DEBUGFUNCTION, test)%0Ac.perform()%0Ac.cleanup()%0A
|
|
b70f16c8c9c967f64d45937fc057f0b3c09fd912
|
test updates
|
searx/tests/test_webapp.py
|
searx/tests/test_webapp.py
|
# -*- coding: utf-8 -*-
import json
from urlparse import ParseResult
from mock import patch
from searx import webapp
from searx.testing import SearxTestCase
class ViewsTestCase(SearxTestCase):
def setUp(self):
webapp.app.config['TESTING'] = True # to get better error messages
self.app = webapp.app.test_client()
# set some defaults
self.test_results = [
{
'content': 'first test content',
'title': 'First Test',
'url': 'http://first.test.xyz',
'engines': ['youtube', 'startpage'],
'engine': 'startpage',
'parsed_url': ParseResult(scheme='http', netloc='first.test.xyz', path='/', params='', query='', fragment=''), # noqa
}, {
'content': 'second test content',
'title': 'Second Test',
'url': 'http://second.test.xyz',
'engines': ['youtube', 'startpage'],
'engine': 'youtube',
'parsed_url': ParseResult(scheme='http', netloc='second.test.xyz', path='/', params='', query='', fragment=''), # noqa
},
]
self.maxDiff = None # to see full diffs
def test_index_empty(self):
result = self.app.post('/')
self.assertEqual(result.status_code, 200)
self.assertIn('<div class="title"><h1>searx</h1></div>', result.data)
@patch('searx.webapp.do_search')
def test_index_html(self, search):
search.return_value = (
self.test_results,
set()
)
result = self.app.post('/', data={'q': 'test'})
self.assertIn(
'<h3 class="result_title"><a href="http://first.test.xyz">First <b>Test</b></a></h3>', # noqa
result.data
)
self.assertIn(
'<p class="content">first <b>test</b> content<br /></p>',
result.data
)
@patch('searx.webapp.do_search')
def test_index_json(self, search):
search.return_value = (
self.test_results,
set()
)
result = self.app.post('/', data={'q': 'test', 'format': 'json'})
result_dict = json.loads(result.data)
self.assertEqual('test', result_dict['query'])
self.assertEqual(
result_dict['results'][0]['content'], 'first test content')
self.assertEqual(
result_dict['results'][0]['url'], 'http://first.test.xyz')
@patch('searx.webapp.do_search')
def test_index_csv(self, search):
search.return_value = (
self.test_results,
set()
)
result = self.app.post('/', data={'q': 'test', 'format': 'csv'})
self.assertEqual(
'title,url,content,host,engine,score\r\n'
'First Test,http://first.test.xyz,first test content,first.test.xyz,startpage,\r\n' # noqa
'Second Test,http://second.test.xyz,second test content,second.test.xyz,youtube,\r\n', # noqa
result.data
)
@patch('searx.webapp.do_search')
def test_index_rss(self, search):
search.return_value = (
self.test_results,
set()
)
result = self.app.post('/', data={'q': 'test', 'format': 'rss'})
self.assertIn(
'<description>Search results for "test" - searx</description>',
result.data
)
self.assertIn(
'<opensearch:totalResults>2</opensearch:totalResults>',
result.data
)
self.assertIn(
'<title>First Test</title>',
result.data
)
self.assertIn(
'<link>http://first.test.xyz</link>',
result.data
)
self.assertIn(
'<description>first test content</description>',
result.data
)
def test_about(self):
result = self.app.get('/about')
self.assertEqual(result.status_code, 200)
self.assertIn('<h1>About <a href="/">searx</a></h1>', result.data)
def test_preferences(self):
result = self.app.get('/preferences')
self.assertEqual(result.status_code, 200)
self.assertIn(
'<form method="post" action="/preferences" id="search_form">',
result.data
)
self.assertIn(
'<legend>Default categories</legend>',
result.data
)
self.assertIn(
'<legend>Interface language</legend>',
result.data
)
def test_stats(self):
result = self.app.get('/stats')
self.assertEqual(result.status_code, 200)
self.assertIn('<h2>Engine stats</h2>', result.data)
def test_robots_txt(self):
result = self.app.get('/robots.txt')
self.assertEqual(result.status_code, 200)
self.assertIn('Allow: /', result.data)
def test_opensearch_xml(self):
result = self.app.get('/opensearch.xml')
self.assertEqual(result.status_code, 200)
self.assertIn('<Description>Search searx</Description>', result.data)
def test_favicon(self):
result = self.app.get('/favicon.ico')
self.assertEqual(result.status_code, 200)
|
Python
| 0 |
@@ -1749,17 +1749,41 @@
st %3C
-b
+span class=%22highlight%22
%3ETest%3C/
-b
+span
%3E%3C/a
@@ -1899,17 +1899,41 @@
st %3C
-b
+span class=%22highlight%22
%3Etest%3C/
-b
+span
%3E co
|
6e42855d527976dd8b1cdb272502ce3aa76f8c6e
|
Add dbee abstract base class.
|
dbeekeeper/dbee/base.py
|
dbeekeeper/dbee/base.py
|
Python
| 0 |
@@ -0,0 +1,4068 @@
+# Copyright 2013 VMware, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0A%0Aimport abc%0A%0A%0Aclass Base(Exception):%0A %22%22%22Abstract base class for dbeekeeper local storage, or 'dbee'.%0A%0A A dbee instance must be accessed from a single thread.%0A%0A Dbee transactions must be idempotent. Much like ZooKeeper snapshots, dbee%0A snapshots are 'fuzzy', meaning that transactions that were executed during%0A snapshotting may or may not be included in the snapshot. During recovery,%0A dbeekeeper executes all the transactions since the beginning of the%0A snapshot it's recoverying from in the same order they were applied%0A originally.%0A %22%22%22%0A%0A __metaclass__ = abc.ABCMeta%0A%0A @abc.abstractmethod%0A def execute(self, transaction):%0A %22%22%22Execute a transaction.%0A%0A This method is *not* responsible for persisting transaction to disk.%0A The caller must maintain a transaction log until it takes a snapshot.%0A%0A Args:%0A transaction: transaction to execute in string.%0A%0A Returns:%0A None%0A%0A Raises:%0A dbeekeeper.DbeeError:%0A DbeeError is considered fatal since it might affet consistency%0A of dbee. When dbee throws a DbeeError, dbeekeeper goes into%0A recovery mode.%0A dbeekeeper.ClientError:%0A ClientError is *not* considered fatal since it does not affect%0A consistency of dbee. Dbeekeeper simply pass ClientErrors back%0A to the client.%0A %22%22%22%0A%0A @abc.abstractmethod%0A def snapshot(self, filename, callback):%0A %22%22%22Take a snapshot of this dbee asynchronously.%0A%0A This method must not block. It should initiate snapshotting in a%0A separate thread/process and return without waiting for the snapshotting%0A to finish. Dbee must reject any other incoming snapshot/restore%0A requests during the snapshot by raising a ClientError.%0A%0A The resulting snapshot must contain all the transactions this dbee%0A successfully executed before the snapshot() was called. For incoming%0A execute requests during the snapshot, dbee must either:%0A%0A a. Block them until the snapshotting finishes.%0A b. Accept the transactions. These transactions may or may not be in the%0A resulting snapshot. It is the caller's responsibility to maintain%0A a log for these transactions until the next snapshot() call finishes%0A successfully.%0A%0A Args:%0A filename: filename to use for the snapshot.%0A callback: function to call when the snapshotting completes. This%0A function must take 2 arguemnts, error and filename. If%0A snapshotting succeeded, the first argument is set to None%0A and the second argument is a string that contains%0A filename of the resulting snapshot. If snapshotting%0A failed, the first argument is an Exception and the second%0A argument is set to None.%0A%0A Returns:%0A None%0A%0A Raises:%0A This method must not raise any dbeekeeper error. All the dbeekeeper%0A errors must be passed in the callback%0A %22%22%22%0A%0A @abc.abstractmethod%0A def restore(self, filename):%0A %22%22%22Restore dbee from a snapshot.%0A%0A This method must block until the restore operation completes.%0A%0A Args:%0A filename: Snapshot file to restore from.%0A%0A Returns:%0A None%0A%0A Raises:%0A dbeekeeper.DbeeError:%0A %22%22%22%0A
|
|
7e96013f21bbb5003b30da1e04833dcf58650602
|
Implement a ThriftHandler for tornado
|
freenoted/tasks/tornado_thrift.py
|
freenoted/tasks/tornado_thrift.py
|
Python
| 0.000517 |
@@ -0,0 +1,595 @@
+from __future__ import absolute_import%0A%0Aimport tornado.web%0Afrom thrift.transport.TTransport import TMemoryBuffer%0Afrom thrift.protocol.TBinaryProtocol import TBinaryProtocol%0A%0A%0Aclass TornadoThriftHandler(tornado.web.RequestHandler):%0A def initialize(self, processor):%0A self.processor = processor%0A %0A def post(self):%0A iprot = TBinaryProtocol(TMemoryBuffer(self.request.body))%0A oprot = TBinaryProtocol(TMemoryBuffer())%0A self.processor.process(iprot, oprot)%0A self.set_header('Content-Type', 'application/x-thrift')%0A self.write(oprot.trans.getvalue())%0A
|
|
c66e64556747736c1ee7461aa6ee8780a330481b
|
add sparse_to_dense_test
|
caffe2/python/sparse_to_dense_test.py
|
caffe2/python/sparse_to_dense_test.py
|
Python
| 0.000006 |
@@ -0,0 +1,3021 @@
+# Copyright (c) 2016-present, Facebook, Inc.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A##############################################################################%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0Afrom caffe2.python import core, workspace%0Afrom caffe2.python.test_util import TestCase%0A%0Aimport numpy as np%0A%0A%0Aclass TestSparseToDense(TestCase):%0A def test_sparse_to_dense(self):%0A op = core.CreateOperator(%0A 'SparseToDense',%0A %5B'indices', 'values'%5D,%0A %5B'output'%5D)%0A workspace.FeedBlob(%0A 'indices',%0A np.array(%5B2, 4, 999, 2%5D, dtype=np.int32))%0A workspace.FeedBlob(%0A 'values',%0A np.array(%5B1, 2, 6, 7%5D, dtype=np.int32))%0A%0A workspace.RunOperatorOnce(op)%0A output = workspace.FetchBlob('output')%0A print(output)%0A%0A expected = np.zeros(1000, dtype=np.int32)%0A expected%5B2%5D = 1 + 7%0A expected%5B4%5D = 2%0A expected%5B999%5D = 6%0A%0A self.assertEqual(output.shape, expected.shape)%0A np.testing.assert_array_equal(output, expected)%0A%0A def test_sparse_to_dense_invalid_inputs(self):%0A op = core.CreateOperator(%0A 'SparseToDense',%0A %5B'indices', 'values'%5D,%0A %5B'output'%5D)%0A workspace.FeedBlob(%0A 'indices',%0A np.array(%5B2, 4, 999, 2%5D, dtype=np.int32))%0A workspace.FeedBlob(%0A 'values',%0A np.array(%5B1, 2, 6%5D, dtype=np.int32))%0A%0A with self.assertRaises(RuntimeError):%0A workspace.RunOperatorOnce(op)%0A%0A def test_sparse_to_dense_with_data_to_infer_dim(self):%0A op = core.CreateOperator(%0A 'SparseToDense',%0A %5B'indices', 'values', 'data_to_infer_dim'%5D,%0A %5B'output'%5D)%0A workspace.FeedBlob(%0A 'indices',%0A np.array(%5B2, 4, 999, 2%5D, dtype=np.int32))%0A workspace.FeedBlob(%0A 'values',%0A np.array(%5B1, 2, 6, 7%5D, dtype=np.int32))%0A workspace.FeedBlob(%0A 'data_to_infer_dim',%0A np.array(np.zeros(1500, ), dtype=np.int32))%0A%0A workspace.RunOperatorOnce(op)%0A output = workspace.FetchBlob('output')%0A print(output)%0A%0A expected = np.zeros(1500, dtype=np.int32)%0A expected%5B2%5D = 1 + 7%0A expected%5B4%5D = 2%0A expected%5B999%5D = 6%0A%0A self.assertEqual(output.shape, expected.shape)%0A np.testing.assert_array_equal(output, expected)%0A
|
|
29e8dce6fc2956dc9f942eca41fdb632c382fe8e
|
Create pylsy.py
|
pylsy/tests/pylsy.py
|
pylsy/tests/pylsy.py
|
Python
| 0.000001 |
@@ -0,0 +1,3050 @@
+# -*- coding: utf-8 -*-%0A%0Afrom __future__ import print_function%0A%0A%0Aclass PylsyTable(object):%0A%0A def __init__(self, attributes):%0A self.StrTable = %22%22%0A self.Attributes = attributes%0A self.Table = %5B%5D%0A self.AttributesLength = %5B%5D%0A self.Cols_num = len(self.Attributes)%0A self.Lines_num = 0%0A for attribute in self.Attributes:%0A col = dict()%0A col%5Battribute%5D = %22%22%0A self.Table.append(col)%0A%0A def print_divide(self):%0A for space in self.AttributesLength:%0A self.StrTable += %22+ %22%0A for sign in range(space):%0A self.StrTable += %22- %22%0A self.StrTable += %22+%22+%22%5Cn%22%0A%0A def add_data(self, attribute, values):%0A for col in self.Table:%0A if attribute in col:%0A dict_values = %5Bstr(value) for value in values%5D%0A col%5Battribute%5D = dict_values%0A%0A def create_table(self):%0A for col in self.Table:%0A values = list(col.values())%5B0%5D%0A if self.Lines_num %3C len(values):%0A self.Lines_num = len(values)%0A # find the length of longest word in current column%0A%0A key_length = len(list(col.keys())%5B0%5D)%0A for value in values:%0A length = len(value)%0A if length %3E key_length:%0A key_length = length%0A self.AttributesLength.append(key_length)%0A self.print_head()%0A self.print_value()%0A%0A def print_head(self):%0A self.print_divide()%0A self.StrTable += %22%7C %22%0A for spaces, attr in zip(self.AttributesLength, self.Attributes):%0A space_num = spaces * 2 - 1%0A start = (space_num - len(attr)) // 2%0A for space in range(start):%0A self.StrTable += %22 %22%0A self.StrTable += attr+' '%0A end = space_num - start - len(attr)%0A for space in range(end):%0A self.StrTable += %22 %22%0A self.StrTable += %22%7C %22%0A self.StrTable += %22%22+'%5Cn'%0A self.print_divide()%0A%0A def print_value(self):%0A for line in range(self.Lines_num):%0A for col, length in zip(self.Table, self.AttributesLength):%0A self.StrTable += %22%7C %22%0A value_length = length * 2 - 1%0A value = list(col.values())%5B0%5D%0A if len(value) != 0:%0A start = (value_length - len(value%5Bline%5D)) // 2%0A for space in range(start):%0A self.StrTable += %22 %22%0A self.StrTable += value%5Bline%5D+' '%0A end = value_length - start - len(value%5Bline%5D)%0A for space in range(end):%0A self.StrTable += %22 %22%0A else:%0A start = 0%0A end = value_length - start + 1%0A for space in range(end):%0A self.StrTable += %22 %22%0A self.StrTable += %22%7C%22+'%5Cn'%0A self.print_divide()%0A%0A def __str__(self):%0A self.create_table()%0A return self.StrTable%0A
|
|
d60c1f9a6e56472611a96779462b42e8505e7905
|
Convert a PDF document to JPEG/PNG image via /pdftoimg endpoint
|
python/pdf_to_img.py
|
python/pdf_to_img.py
|
Python
| 0.999999 |
@@ -0,0 +1,463 @@
+import requests%0Aimport json%0A# Convert a PDF document to JPEG/PNG image via /pdftoimg endpoint - https://pixlab.io/cmd?id=pdftoimg%0Areq = requests.get('https://api.pixlab.io/pdftoimg',params=%7B%0A 'src':'https://www.getharvest.com/downloads/Invoice_Template.pdf',%0A 'export': 'jpeg',%0A 'key':'My_PixLab_Key'%0A%7D)%0Areply = req.json()%0Aif reply%5B'status'%5D != 200:%0A print (reply%5B'error'%5D)%0Aelse:%0A print (%22Link to the image output (Converted PDF page): %22+ reply%5B'link'%5D)%0A
|
|
198b54c9ff796cc98cccfdc530f0111739901b0d
|
Create base-7.py
|
Python/base-7.py
|
Python/base-7.py
|
Python
| 0.000022 |
@@ -0,0 +1,774 @@
+# Time: O(1)%0A# Space: O(1)%0A%0A# Given an integer, return its base 7 string representation.%0A#%0A# Example 1:%0A# Input: 100%0A# Output: %22202%22%0A# Example 2:%0A# Input: -7%0A# Output: %22-10%22%0A# Note: The input will be in range of %5B-1e7, 1e7%5D.%0A%0Aclass Solution(object):%0A def convertToBase7(self, num):%0A if num %3C 0: return '-' + self.convertToBase7(-num)%0A result = ''%0A while num:%0A result = str(num %25 7) + result%0A num //= 7%0A return result if result else '0'%0A%0A%0Aclass Solution2(object):%0A def convertToBase7(self, num):%0A %22%22%22%0A :type num: int%0A :rtype: str%0A %22%22%22%0A if num %3C 0: return '-' + self.convertToBase7(-num)%0A if num %3C 7: return str(num)%0A return self.convertToBase7(num // 7) + str(num %25 7)%0A
|
|
50dc018891511ce34b4177a43cfcd678456444cf
|
test of quasiisothermaldf's meanvR
|
nose/test_qdf.py
|
nose/test_qdf.py
|
Python
| 0 |
@@ -0,0 +1,827 @@
+# Tests of the quasiisothermaldf module%0Aimport numpy%0A#fiducial setup uses these%0Afrom galpy.potential import MWPotential%0Afrom galpy.actionAngle import actionAngleAdiabatic, actionAngleStaeckel%0Afrom galpy.df import quasiisothermaldf%0AaAA= actionAngleAdiabatic(pot=MWPotential,c=True)%0AaAS= actionAngleStaeckel(pot=MWPotential,c=True,delta=0.5)%0A%0Adef test_meanvR_adiabatic_gl():%0A qdf= quasiisothermaldf(1./4.,0.2,0.1,1.,1.,%0A pot=MWPotential,aA=aAA,cutcounter=True)%0A #In the mid-plane%0A assert numpy.fabs(qdf.meanvR(0.9,0.,gl=True)) %3C 0.01, %22qdf's meanvr is not equal to zero%22%0A #higher up%0A assert numpy.fabs(qdf.meanvR(0.9,0.2,gl=True)) %3C 0.01, %22qdf's meanvr is not equal to zero%22%0A assert numpy.fabs(qdf.meanvR(0.9,-0.25,gl=True)) %3C 0.01, %22qdf's meanvr is not equal to zero%22%0A return None%0A
|
|
660e53fa4505782a2d1484cc0b6e598edc851df0
|
Initialize P05_stylingExcel
|
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
|
books/AutomateTheBoringStuffWithPython/Chapter12/P05_stylingExcel.py
|
Python
| 0.000002 |
@@ -0,0 +1,351 @@
+# This program uses the OpenPyXL module to manipulate Excel documents%0A%0Aimport openpyxl%0Afrom openpyxl.styles import Font, Style%0A%0Awb = openpyxl.Workbook()%0Asheet = wb.get_sheet_by_name(%22Sheet%22)%0A%0Aitalic24Font = Font(size=24, italic=True)%0AstyleObj = Style(font=italic24Font)%0Asheet%5B%22A1%22%5D.style = styleObj%0Asheet%5B%22A1%22%5D = %22Hello world!%22%0Awb.save(%22styled.xlsx%22)%0A
|
|
fb5ed0ea066c9bdb801a95e50d78529addffbed8
|
add twitter url to video URL email
|
dj/scripts/email_url.py
|
dj/scripts/email_url.py
|
#!/usr/bin/python
# email_url.py
# emails the video URL to the presenters
from email_ab import email_ab
class email_url(email_ab):
ready_state = 7
subject_template = "[{{ep.show.name}}] Video up: {{ep.name}}"
body_body = """
The video of your talk is posted:
{{url}}
Look at it, make sure the title is spelled right, let me know if it is OK.
If you are satisfied, tweet it, blog it, whatever it. No point in making videos if no one watches them.
To approve it click the Approve button at
http://veyepar.nextdayvideo.com/main/approve/{{ep.id}}/{{ep.slug}}/{{ep.edit_key}}/
As soon as you or someone approves your video, it will be tweeted on @NextDayVideo{% if ep.show.client.tweet_prefix %} tagged {{ep.show.client.tweet_prefix}}{% endif %}. It will also be sent to the event organizers in hopes that they add it to the event website.
"""
py_name = "email_url.py"
def more_context(self, ep):
# If there is a Richard (pyvideo) url, use that;
# else use the youtube url.
url = ep.public_url or ep.host_url
return {'url':url}
if __name__ == '__main__':
p=email_url()
p.main()
|
Python
| 0.000003 |
@@ -236,20 +236,16 @@
y = %22%22%22%0A
-
The vide
@@ -282,21 +282,43 @@
%7B%7Burl%7D%7D%0A
-%0A
+%7B%25 if ep.state == 7 %25%7D%0A
Look at
@@ -361,39 +361,42 @@
ight
-, let me know if it is OK.%0A
+ and the audio sounds reasonable.%0A
If y
@@ -500,17 +500,9 @@
em.%0A
- %0A
+%0A
To a
@@ -900,16 +900,204 @@
site. %0A
+ %7B%25 endif %25%7D%0A %7B%25 if ep.twitter_url %25%7D%0AIt has been tweeted: %7B%7B ep.twitter_url %7D%7D%0ARe-tweet it, blog it, whatever it. No point in making videos if no one watches them.%0A %7B%25 endif %25%7D%0A
%22%22%22
|
b47369d43a0a85ac2bc32bfa77c6a4d9074ce700
|
Add basic test case for retrieve_dns module
|
test/test_retrieve_dns.py
|
test/test_retrieve_dns.py
|
Python
| 0 |
@@ -0,0 +1,1606 @@
+import logging%0Aimport os%0Aimport tempfile%0Aimport unittest%0A%0Aimport mock%0A%0Aimport bin.retrieve_dns%0A%0A%0Alogging.basicConfig(level=logging.INFO)%0A%0A%0Aclass RetrieveDnsTestCase(unittest.TestCase):%0A%0A def setUp(self):%0A # Mock out logging%0A mock.patch('bin.retrieve_dns.set_up_logging', autospec=True).start()%0A%0A # Mock out config%0A mock_config = mock.patch('bin.retrieve_dns.get_config', autospec=True).start()%0A%0A # Mock out retrieving xml%0A self.mock_xml = mock.patch('bin.retrieve_dns.get_xml', autospec=True).start()%0A%0A # Set up temp files%0A self.files = %7B%7D%0A for item in ('dn', 'extra', 'ban'):%0A self.files%5Bitem%5D = dict(zip(('handle', 'path'), tempfile.mkstemp()))%0A os.write(self.files%5Bitem%5D%5B'handle'%5D, '/wobble')%0A for item in self.files.values():%0A os.close(item%5B'handle'%5D)%0A%0A # Set up config using temp files%0A c = bin.retrieve_dns.Configuration()%0A c.dn_file = self.files%5B'dn'%5D%5B'path'%5D%0A c.extra_dns = self.files%5B'extra'%5D%5B'path'%5D%0A c.banned_dns = self.files%5B'ban'%5D%5B'path'%5D%0A mock_config.return_value = c%0A%0A def test_basics(self):%0A self.mock_xml.return_value = %22%3CHOSTDN%3E/wibble%3C/HOSTDN%3E%22%0A bin.retrieve_dns.runprocess(%22fakefile%22, %22fakefile%22)%0A dns = open(self.files%5B'dn'%5D%5B'path'%5D)%0A self.assertEqual(dns.read(), '/wibble%5Cn')%0A dns.close()%0A%0A def tearDown(self):%0A # Delete temp files%0A for item in self.files.values():%0A os.remove(item%5B'path'%5D)%0A%0A mock.patch.stopall()%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
4f9aaa4809c0ff083393088e205a48c3197b46f2
|
add forgoten '\n' in `Usage` output
|
platform/platform-resources/src/launcher.py
|
platform/platform-resources/src/launcher.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import sys
import os
import time
# see com.intellij.idea.SocketLock for the server side of this interface
RUN_PATH = u'$RUN_PATH$'
CONFIG_PATH = u'$CONFIG_PATH$'
args = []
skip_next = False
for i, arg in enumerate(sys.argv[1:]):
if arg == '-h' or arg == '-?' or arg == '--help':
print(('Usage:\n' +
' {0} -h |-? | --help\n' +
' {0} [-l|--line line] file[:line]\n' +
' {0} diff <left> <right>' +
' {0} merge <local> <remote> [base] <merged>').format(sys.argv[0]))
exit(0)
elif arg == 'diff' and i == 0:
args.append(arg)
elif arg == 'merge' and i == 0:
args.append(arg)
elif arg == '-l' or arg == '--line':
args.append(arg)
skip_next = True
elif skip_next:
args.append(arg)
skip_next = False
else:
if ':' in arg:
file_path, line_number = arg.rsplit(':', 1)
if line_number.isdigit():
args.append('-l')
args.append(line_number)
args.append(os.path.abspath(file_path))
else:
args.append(os.path.abspath(arg))
else:
args.append(os.path.abspath(arg))
def launch_with_port(port):
found = False
s = socket.socket()
s.settimeout(0.3)
try:
s.connect(('127.0.0.1', port))
except:
return False
while True:
try:
path_len = struct.unpack(">h", s.recv(2))[0]
path = s.recv(path_len)
if os.path.abspath(path) == os.path.abspath(CONFIG_PATH):
found = True
break
except:
break
if found:
if args:
cmd = "activate " + os.getcwd() + "\0" + "\0".join(args)
encoded = struct.pack(">h", len(cmd)) + cmd
s.send(encoded)
time.sleep(0.5) # don't close socket immediately
return True
return False
port = -1
try:
f = open(os.path.join(CONFIG_PATH, 'port'))
port = int(f.read())
except Exception:
type, value, traceback = sys.exc_info()
print('No IDE instance has been found. New one will be started.')
port = -1
if port == -1:
# SocketLock actually allows up to 50 ports, but the checking takes too long
for port in range(6942, 6942 + 10):
if launch_with_port(port):
exit()
else:
if launch_with_port(port):
exit()
if sys.platform == "darwin":
# OS X: RUN_PATH is *.app path
if len(args):
args.insert(0, "--args")
os.execvp("open", ["-a", RUN_PATH] + args)
else:
# unix common
bin_dir, bin_file = os.path.split(RUN_PATH)
os.execv(RUN_PATH, [bin_file] + args)
|
Python
| 0.003142 |
@@ -522,16 +522,18 @@
%3Cright%3E
+%5Cn
' +%0A
|
b97a9571478dc8c919e072734816b412dadc0da9
|
Add maths plugin
|
plugins/maths.py
|
plugins/maths.py
|
Python
| 0.000001 |
@@ -0,0 +1,823 @@
+import io%0Aimport unittest%0A%0Afrom sympy.parsing import sympy_parser%0A%0Aclass Plugin:%0A def on_command(self, bot, msg, stdin, stdout, reply):%0A expr = %22 %22.join(msg%5B%22args%22%5D%5B1:%5D)%0A expr = sympy_parser.parse_expr(expr)%0A print(expr.evalf(), file=stdout)%0A%0A def on_help(self):%0A return %22Perform maths expressions.%22%0A%0A%0Aclass Test(unittest.TestCase):%0A def setUp(self):%0A self.plugin = Plugin()%0A%0A def test_command(self):%0A for a in range(1, 1000, 50):%0A for b in range(1, 1000, 50):%0A stdout = io.StringIO()%0A self.plugin.on_command(None, %7B%22args%22: %5BNone, str(a) + %22*%22 + str(b)%5D%7D, None, stdout, None)%0A self.assertEqual(int(float(stdout.getvalue().strip())), a * b)%0A%0A def test_help(self):%0A self.assertTrue(self.plugin.on_help())%0A
|
|
8ab44294c0dd7b95102bfa1d9e8437067813cd0f
|
Add basic document parser
|
vc2xlsx/doc_parser.py
|
vc2xlsx/doc_parser.py
|
Python
| 0.000004 |
@@ -0,0 +1,1156 @@
+import parsley%0A%0Aclass Goto (object):%0A%09def __init__(self, x, y):%0A%09%09self.x = x%0A%09%09self.y = y%0A%0A%09def __repr__(self):%0A%09%09return %22Goto(%7B%7D, %7B%7D)%22.format(repr(self.x), repr(self.y))%0A%0Aclass Entry (object):%0A%09def __init__(self, value):%0A%09%09self.value = value%0A%0A%09def __repr__(self):%0A%09%09return %22Entry(%7B%7D)%22.format(repr(self.value))%0A%0Aclass Menu (object):%0A%09def __init__(self, command):%0A%09%09self.command = command%0A%0A%09def __repr__(self):%0A%09%09return %22Menu(%7B%7D)%22.format(repr(self.command))%0A%0A_grammar = parsley.makeGrammar(r%22%22%22%0Adocument = command*:c -%3E tuple(x for x in c if x)%0Acommand = goto_command %7C menu_command %7C entry_command %7C nl%0Agoto_command = '%3E' %3Cletter+%3E:x %3Cdigit+%3E:y (':' %7C nl) -%3E Goto(x, y)%0Aentry_command = %3C(letter %7C digit %7C '%22' %7C '%5C'' %7C '+' %7C '-' %7C '(' %7C '#' %7C '@') not_nl*%3E:value -%3E Entry(value)%0Amenu_command = '/' %3C(letter %7C '-') (letter %7C digit %7C '$' %7C '*')*%3E:command -%3E Menu(command)%0Anl = ('%5Cr'? '%5Cn' %7C '%5Cr') -%3E None%0Anot_nl = anything:x ?(x not in '%5Cr%5Cn') -%3E x%0A%22%22%22, globals())%0A%0Adef parse(value):%0A%09return _grammar(value.rstrip('%5C0%5Cr%5Cn%5Ct ')).document()%0A%0Aif __name__ == %22__main__%22:%0A%09import sys%0A%0A%09with open(sys.argv%5B1%5D) as f:%0A%09%09result = parse(f.read())%0A%09%09print(repr(result))%0A
|
|
01bcda4326dc0204798f268bb1c60f06526aaba3
|
add freebsd shadow module
|
salt/modules/freebsd_shadow.py
|
salt/modules/freebsd_shadow.py
|
Python
| 0 |
@@ -0,0 +1,1851 @@
+'''%0AManage the password database on FreeBSD systems%0A'''%0A%0A# Import python libs%0Aimport os%0Atry:%0A import pwd%0Aexcept ImportError:%0A pass%0A%0A# Import salt libs%0Aimport salt.utils%0A%0A%0Adef __virtual__():%0A return 'shadow' if __grains__.get('os', '') == 'FreeBSD' else False%0A%0A%0Adef info(name):%0A '''%0A Return information for the specified user%0A%0A CLI Example::%0A%0A salt '*' shadow.info root%0A '''%0A try:%0A data = pwd.getpwnam(name)%0A ret = %7B%0A 'name': data.pw_name,%0A 'passwd': data.pw_passwd if data.pw_passwd != '*' else '',%0A 'change': '',%0A 'expire': ''%7D%0A except KeyError:%0A return %7B%0A 'name': '',%0A 'passwd': '',%0A 'change': '',%0A 'expire': ''%7D%0A%0A # Get password aging info%0A cmd = 'pw user show %7B0%7D %7C cut -f6,7 -d:'.format(name)%0A try:%0A change, expire = __salt__%5B'cmd.run_all'%5D(cmd)%5B'stdout'%5D.split(':')%0A except ValueError:%0A pass%0A else:%0A ret%5B'change'%5D = change%0A ret%5B'expire'%5D = expire%0A%0A return ret%0A%0A%0Adef set_password(name, password):%0A '''%0A Set the password for a named user. The password must be a properly defined%0A hash. The password hash can be generated with this command:%0A%0A %60%60python -c %22import crypt; print crypt.crypt('password',%0A '$6$SALTsalt')%22%60%60%0A%0A %60%60SALTsalt%60%60 is the 8-character crpytographic salt. Valid characters in the%0A salt are %60%60.%60%60, %60%60/%60%60, and any alphanumeric character.%0A%0A Keep in mind that the $6 represents a sha512 hash, if your OS is using a%0A different hashing algorithm this needs to be changed accordingly%0A%0A CLI Example::%0A%0A salt '*' shadow.set_password root '$1$UYCIxa628.9qXjpQCjM4a..'%0A '''%0A __salt__%5B'cmd.run'%5D('pw user mod %7B0%7D -H 0'.format(name), stdin=password)%0A uinfo = info(name)%0A return uinfo%5B'passwd'%5D == password%0A
|
|
9dab373023fa6b7767cd7555a533161752205eda
|
Test a weighted affine solver.
|
scripts/0-weighted-affine.py
|
scripts/0-weighted-affine.py
|
Python
| 0.001095 |
@@ -0,0 +1,429 @@
+#!/usr/bin/python%0A%0Aimport sys%0A%0Asys.path.append('../lib')%0Aimport transformations%0A%0Av0 = %5B%5B0, 1031, 1031, 0%5D, %5B0, 0, 1600, 1600%5D%5D%0Av1 = %5B%5B675, 826, 826, 677%5D, %5B55, 52, 281, 277%5D%5D%0A#weights = %5B1.0, 1.0, 1.0, 1.0%5D%0Aweights = %5B0.1, 0.01, 0.1, 0.2%5D%0Aprint %22original%22%0Aprint transformations.affine_matrix_from_points(v0, v1, shear=False)%0Aprint %22weighted%22%0Aprint transformations.affine_matrix_from_points_weighted(v0, v1, weights, shear=False)%0A
|
|
260e0ef2bc37750dccea47d30110221c272e757a
|
Add script for automating analysis for all corpora
|
run_all_corpora.py
|
run_all_corpora.py
|
Python
| 0 |
@@ -0,0 +1,1180 @@
+import os%0Aimport argparse%0Aimport subprocess%0A%0Aparser = argparse.ArgumentParser()%0Aparser.add_argument(%22corpusdir%22, help = %22Path to the directory containing corpus directories%22)%0Aparser.add_argument(%22script%22, help = %22name of the script to be run%22)%0Aargs = parser.parse_args()%0A%0A## lists of corpora to skip%0A## and failed to run%0Askipped = %5B%5D%0Afailed = %5B%5D%0A%0A## first check that the script exists%0Aassert(os.path.isfile(args.script), %22%7B%7D should be a script that exists%22.format(args.script))%0A%0A## loop through files in the directory%0Afor corpus in os.listdir(args.corpusdir):%0A ## check if the file is actually a directory since that is the expected format for the%0A ## analysis scripts%0A if os.path.isdir(corpus):%0A if corpus in skipped:%0A continue%0A try:%0A print(%22Processing %7B%7D%22.format(corpus))%0A ## first reset the corpus%0A subprocess.call(%5B'python', 'reset_database.py', corpus%5D)%0A%0A ## run the script on the corpus%0A subprocess.call(%5B'python', args.script, corpus, %22-s%22%5D)%0A%0A except:%0A failed.append(corpus)%0A continue%0Aprint(%22Complete!%22)%0Aprint(%22Following corpora were not run: %7B%7D%22 failed)%0A
|
|
593941ec42918a389a348a5d35e8c5033bb34e73
|
Add 8ball plugin
|
plugins/ball8.py
|
plugins/ball8.py
|
Python
| 0 |
@@ -0,0 +1,1360 @@
+import random%0Afrom plugin import CommandPlugin, PluginException%0A%0A%0Aclass Ball8(CommandPlugin):%0A %22%22%22%0A 8ball command (by javipepe :))%0A %22%22%22%0A%0A def __init__(self, bot):%0A CommandPlugin.__init__(self, bot)%0A self.triggers = %5B'8ball'%5D%0A self.short_help = 'Ask me a question'%0A self.help = 'Ask me a question, I%5C'll decide what the answer should be. Based on https://en.wikipedia.org/wiki/Magic_8-Ball'%0A self.help_example = %5B'!8ball Is linux better than windows?'%5D%0A # %5E obviously yes.%0A%0A def on_command(self, event, response):%0A args = event%5B'text'%5D%0A if not args or not args%5B-1:%5D.__contains__('?'):%0A raise PluginException('Invalid argument! Ask me a question!')%0A else:%0A possible_answers = %5B'It is certain', 'It is decidedly so', 'Without a doubt', 'Yes, definitely', 'You may rely on it', 'As I see it, yes', 'Most likely', 'Outlook good', 'Yes', 'Signs point to yes', 'Reply hazy try again', 'Ask again later', 'Better not tell you now', 'Cannot predict now', 'Concentrate and ask again', 'Do%5C't count on it', 'My reply is no', 'My sources say no', 'Outlook not so good', 'Very doubtful'%5D%0A%0A response%5B'text'%5D = ':8ball: says *_%25s_*!' %25 random.choice(possible_answers)%0A self.bot.sc.api_call('chat.postMessage', **response)%0A
|
|
d22ca6dbf7e8aa98b0f580b7972e157894925365
|
Fix test output for combining filename and extension
|
tests/test_auto_moving.py
|
tests/test_auto_moving.py
|
import os
import shutil
from nose.tools import assert_equal
from .base import BaseTest
class TestAutoMoving(BaseTest):
organise = True
def teardown(self):
super(TestAutoMoving, self).teardown()
shutil.rmtree(self.organised)
os.mkdir(self.organised)
def test_using_organise_uses_the_specified_organise_folder(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
organise_dir = os.path.join('/', *path.split('/')[:-3])
assert_equal(self.organised, organise_dir)
def test_using_organise_uses_the_correct_show_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-3:][0]
assert_equal(season_dir, self._file.show_name)
def test_using_organise_uses_the_correct_season_folder_in_the_path(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
season_dir = path.split('/')[-2:][0]
assert_equal(season_dir, 'Season {0}'.format(self._file.season))
def test_using_organise_uses_the_correct_filename(self):
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
filename = path.split('/')[-1:][0].split(' - ')[-1:][0]
assert_equal(filename, '.'.join([self._file.episodes[0].title, self._file.extension]))
def test_moving_the_leading_the_to_the_end_of_a_show_name_causes_the_show_folder_name_to_follow_suit_when_using_organise(self):
show_name = 'Big Bang Theory, The'
self._file.show_name = show_name
path = self.tv.build_path(self._file, organise=self.organise, rename_dir=self.organised)
show_dir = path.split('/')[-3:][0]
assert_equal(show_dir, show_name)
|
Python
| 0.000003 |
@@ -1385,17 +1385,16 @@
ename, '
-.
'.join(%5B
|
98c658822cf6782ca0907ab7a68691922e701aa6
|
Add unittest for pytesseract
|
tests/test_pytesseract.py
|
tests/test_pytesseract.py
|
Python
| 0.000001 |
@@ -0,0 +1,560 @@
+import unittest%0Aimport io%0Aimport pytesseract%0Aimport numpy as np%0Afrom wand.image import Image as wandimage%0A%0Aclass TestPytesseract(unittest.TestCase):%0A def test_tesseract(self):%0A # Open pdf with Wand%0A with wandimage(filename='/input/tests/data/test.pdf') as wand_image:%0A img_buffer = np.asarray(bytearray(wand_image.make_blob(format='png')), dtype='uint8')%0A bytesio = io.BytesIO(img_buffer)%0A test_string = pytesseract.image_to_string(PILImage.open(bytesio))%0A self.assertTrue(type(test_string) == str)%0A
|
|
f8d8580dfffee35236478ec75116b291499c085c
|
Create maximum-average-subarray-i.py
|
Python/maximum-average-subarray-i.py
|
Python/maximum-average-subarray-i.py
|
Python
| 0.999246 |
@@ -0,0 +1,852 @@
+# Time: O(n)%0A# Space: O(1)%0A%0A# Given an array consisting of n integers,%0A# find the contiguous subarray of given length k that has the maximum average value.%0A# And you need to output the maximum average value.%0A#%0A# Example 1:%0A# Input: %5B1,12,-5,-6,50,3%5D, k = 4%0A# Output: 12.75%0A# Explanation: Maximum average is (12-5-6+50)/4 = 51/4 = 12.75%0A# Note:%0A# 1 %3C= k %3C= n %3C= 30,000.%0A# Elements of the given array will be in the range %5B-10,000, 10,000%5D.%0A%0Aclass Solution(object):%0A def findMaxAverage(self, nums, k):%0A %22%22%22%0A :type nums: List%5Bint%5D%0A :type k: int%0A :rtype: float%0A %22%22%22%0A total = 0%0A for i in xrange(k):%0A total += nums%5Bi%5D%0A result = total%0A for i in xrange(k, len(nums)):%0A total += nums%5Bi%5D - nums%5Bi-k%5D%0A result = max(result, total)%0A return float(result) / k%0A
|
|
164ccb9206885b216e724b3618ebae5601ab0ac0
|
Add parallel execution utility module.
|
parallel.py
|
parallel.py
|
Python
| 0 |
@@ -0,0 +1,199 @@
+import multiprocessing as mp%0A%0A%0Adef run_func(func, args, parallel=False):%0A if parallel:%0A mp.Pool(mp.cpu_count() - 1).map(func, args)%0A else:%0A for arg in args:%0A func(arg)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.