commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
a3b2b50209908fba83997bdc6d75dde222636489
|
Add test for double close
|
tests/test_sa_connection.py
|
tests/test_sa_connection.py
|
import asyncio
from aiopg import connect, sa, Cursor
import unittest
import sqlalchemy
from sqlalchemy import MetaData, Table, Column, Integer, String, Sequence
meta = MetaData()
tbl = Table('sa_tbl', meta,
#FetchedValue()
Column('id', Integer, Sequence('sa_tbl_id_seq'), nullable=False,
primary_key=True),
Column('name', String(255)))
class TestSACnnection(unittest.TestCase):
def setUp(self):
self.loop = asyncio.new_event_loop()
asyncio.set_event_loop(None)
def tearDown(self):
self.loop.close()
@asyncio.coroutine
def connect(self, **kwargs):
conn = yield from connect(database='aiopg',
user='aiopg',
password='passwd',
host='127.0.0.1',
loop=self.loop,
**kwargs)
cur = yield from conn.cursor()
yield from cur.execute("DROP TABLE IF EXISTS sa_tbl")
yield from cur.execute("CREATE TABLE sa_tbl "
"(id serial, name varchar(255))")
yield from cur.execute("INSERT INTO sa_tbl (name)"
"VALUES ('first')")
cur.close()
return sa.SAConnection(conn, sa.dialect)
def test_execute_text_select(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
res = yield from conn.execute("SELECT * FROM sa_tbl;")
self.assertIsInstance(res.cursor, Cursor)
self.assertEqual(('id', 'name'), res.keys())
rows = [r for r in res]
self.assertTrue(res.closed)
self.assertIsNone(res.cursor)
self.assertEqual(1, len(rows))
row = rows[0]
self.assertEqual(1, row[0])
self.assertEqual(1, row['id'])
self.assertEqual(1, row.id)
self.assertEqual('first', row[1])
self.assertEqual('first', row['name'])
self.assertEqual('first', row.name)
self.loop.run_until_complete(go())
def test_execute_sa_select(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
res = yield from conn.execute(tbl.select())
self.assertIsInstance(res.cursor, Cursor)
self.assertEqual(('id', 'name'), res.keys())
rows = [r for r in res]
self.assertTrue(res.closed)
self.assertIsNone(res.cursor)
self.assertTrue(res.returns_rows)
self.assertEqual(1, len(rows))
row = rows[0]
self.assertEqual(1, row[0])
self.assertEqual(1, row['id'])
self.assertEqual(1, row.id)
self.assertEqual('first', row[1])
self.assertEqual('first', row['name'])
self.assertEqual('first', row.name)
self.loop.run_until_complete(go())
def test_scalar(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
res = yield from conn.scalar(tbl.count())
self.assertEqual(1, res)
self.loop.run_until_complete(go())
def test_row_proxy(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
res = yield from conn.execute(tbl.select())
rows = [r for r in res]
row = rows[0]
self.assertEqual(2, len(row))
self.assertEqual(['id', 'name'], list(row))
self.assertIn('id', row)
self.assertNotIn('unknown', row)
self.assertEqual('first', row.name)
with self.assertRaises(AttributeError):
row.unknown
self.loop.run_until_complete(go())
def test_insert(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
res = yield from conn.execute(tbl.insert().values(name='second'))
self.assertEqual(('id',), res.keys())
self.assertEqual(1, res.rowcount)
self.assertTrue(res.returns_rows)
rows = [r for r in res]
self.assertEqual(1, len(rows))
self.assertEqual(2, rows[0].id)
self.loop.run_until_complete(go())
def test_delete(self):
@asyncio.coroutine
def go():
conn = yield from self.connect()
res = yield from conn.execute(tbl.delete().where(tbl.c.id==1))
self.assertEqual((), res.keys())
self.assertEqual(1, res.rowcount)
self.assertFalse(res.returns_rows)
self.assertTrue(res.closed)
self.assertIsNone(res.cursor)
self.loop.run_until_complete(go())
|
Python
| 0 |
@@ -4781,28 +4781,462 @@
op.run_until_complete(go())%0A
+%0A def test_double_close(self):%0A @asyncio.coroutine%0A def go():%0A conn = yield from self.connect()%0A res = yield from conn.execute(%22SELECT 1%22)%0A res.close()%0A self.assertTrue(res.closed)%0A self.assertIsNone(res.cursor)%0A res.close()%0A self.assertTrue(res.closed)%0A self.assertIsNone(res.cursor)%0A%0A self.loop.run_until_complete(go())%0A
|
70ad81a24e218fd2b5fed03224611eae63e0d58f
|
add main argument processing file
|
boxes/argsParse.py
|
boxes/argsParse.py
|
Python
| 0 |
@@ -0,0 +1,18 @@
+import argparse%0A%0A%0A
|
|
a633cc0b4ee376ff02af101154e60b8b33dfda08
|
add migration for old logs
|
scripts/migrate_preprint_logs.py
|
scripts/migrate_preprint_logs.py
|
Python
| 0 |
@@ -0,0 +1,1754 @@
+import sys%0Aimport logging%0Afrom datetime import datetime%0A%0Afrom modularodm import Q%0Afrom modularodm.exceptions import NoResultsFound%0A%0Afrom website.app import init_app%0Afrom website.models import NodeLog, PreprintService%0A%0Alogger = logging.getLogger(__name__)%0A%0A%0Adef main(dry):%0A if dry:%0A logging.warn('DRY mode running')%0A now = datetime.utcnow()%0A initiated_logs = NodeLog.find(Q('action', 'eq', NodeLog.PREPRINT_INITIATED) & Q('date', 'lt', now))%0A for log in initiated_logs:%0A try:%0A preprint = PreprintService.find_one(Q('node', 'eq', log.node))%0A log.params.update(%7B%0A 'preprint': %7B%0A 'id': preprint._id%0A %7D,%0A 'service': %7B%0A 'name': preprint.provider.name%0A %7D%0A %7D)%0A logging.info('Updating log %7B%7D from node %7B%7D, with preprint id: %7B%7D'.format(log._id, log.node.title, preprint._id))%0A if not dry:%0A log.save()%0A except NoResultsFound:%0A pass%0A%0A updated_logs = NodeLog.find(Q('action', 'eq', NodeLog.PREPRINT_FILE_UPDATED) & Q('date', 'lt', now))%0A for log in updated_logs:%0A try:%0A preprint = PreprintService.find_one(Q('node', 'eq', log.node))%0A log.params.update(%7B%0A 'preprint': %7B%0A 'id': preprint._id%0A %7D%0A %7D)%0A logging.info('Updating log %7B%7D from node %7B%7D, with preprint id: %7B%7D'.format(log._id, log.node.title, preprint._id))%0A if not dry:%0A log.save()%0A except NoResultsFound:%0A pass%0A%0Aif __name__ == '__main__':%0A init_app(routes=False) # Sets the storage backends on all models%0A dry = 'dry' in sys.argv%0A main(dry)%0A
|
|
caf2d7108d7329da562a012775bac0a87d4c62b6
|
Create db_create.py
|
fade/db_create.py
|
fade/db_create.py
|
Python
| 0.000011 |
@@ -0,0 +1,587 @@
+#!flask/bin/python%0A%22%22%22%0A See LICENSE.txt file for copyright and license details.%0A%22%22%22%0Afrom migrate.versioning import api%0Afrom config import SQLALCHEMY_DATABASE_URI%0Afrom config import SQLALCHEMY_MIGRATE_REPO%0Afrom app import db%0Aimport os.path%0A%0A%0Adb.create_all()%0Aif not os.path.exists(SQLALCHEMY_MIGRATE_REPO):%0A api.create(SQLALCHEMY_MIGRATE_REPO, 'database repository')%0A api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO)%0Aelse:%0A api.version_control(SQLALCHEMY_DATABASE_URI, SQLALCHEMY_MIGRATE_REPO,%0A api.version(SQLALCHEMY_MIGRATE_REPO))%0A
|
|
f6f75172b1b8a41fc5ae025416ea665258d4ff4c
|
Add script for updating favicon from gh avatar
|
favicon-update.py
|
favicon-update.py
|
Python
| 0 |
@@ -0,0 +1,787 @@
+from PIL import Image%0Aimport requests%0Afrom io import BytesIO%0A%0A# This whole script was done using Google and StackOverflow%0A# How to generate ico files%0A# https://stackoverflow.com/a/36168447/1697953%0A# How to get GitHub avatar location from username%0A# https://stackoverflow.com/a/36380674/1697953%0A# How to read image data from URL%0A# https://stackoverflow.com/a/23489503/1697953%0A# How to follow redirects in requests%0A# https://stackoverflow.com/a/50606372/1697953%0A%0AavatarUrl = 'https://github.com/sorashi.png'%0A%0Aif __name__ == %22__main__%22:%0A r = requests.head(avatarUrl, allow_redirects=True)%0A print('Avatar located at ' + r.url)%0A response = requests.get(r.url)%0A img = Image.open(BytesIO(response.content))%0A img.save('favicon.ico', sizes=%5B(16, 16), (32, 32), (48, 48), (64, 64)%5D)
|
|
75031595de8726dcd21535b13385c4e6c89aa190
|
Add run meter task
|
datastore/tasks.py
|
datastore/tasks.py
|
Python
| 0.999872 |
@@ -0,0 +1,225 @@
+from __future__ import absolute_import%0A%0Afrom celery import shared_task%0A%0Afrom datastore.models import Project%0A%0A%0A@shared_task%0Adef run_meter(project_pk):%0A project = Project.objects.get(pk=project_pk):%0A project.run_meter()%0A
|
|
27c5a09ddbe2ddf14b2f4c84ebb668adbdfd7070
|
ADD example.basicserver for test
|
example/basicserver.py
|
example/basicserver.py
|
Python
| 0 |
@@ -0,0 +1,265 @@
+%0A%0Afrom wood import Wood%0A%0Aw = Wood(__name__,debug=True)%0A%0AIndexHandler = w.empty(uri='/',name='IndexHandler')%0A%[email protected]%0Adef index_page(self):%0A self.write('%E6%BB%91%E7%A8%BD%EF%BC%8C%E8%BF%99%E9%87%8C%E4%BB%80%E4%B9%88%E9%83%BD%E6%B2%A1%E6%9C%89%5Cn(HuajiEnv)')%0A%0Aif __name__ == '__main__':%0A w.start(port=6000)%0A %0A %0A %0A %0A
|
|
8049e2f0bb0a12bb301ab4390c3e4da3d90f0369
|
Move stagingsettings to new 'cosmos' project tree
|
cosmos/platform/frontend/src/bdp_fe/conf/stagingsettings.py
|
cosmos/platform/frontend/src/bdp_fe/conf/stagingsettings.py
|
Python
| 0 |
@@ -0,0 +1,1663 @@
+%22%22%22%0AModule testsettings%0A%0AThese settings allow Django unittests to setup a temporary databse and run the%0Atests of the installed applications.%0A%0A%22%22%22%0A%0ADEBUG = True%0ATEMPLATE_DEBUG = DEBUG%0A%0Afrom bdp_fe.conf.base_settings import *%0A%0ADATABASES = %7B%0A 'default': %7B%0A 'ENGINE': 'django.db.backends.sqlite3',%0A 'NAME': '/tmp/bdp_fe.db'%0A %7D%0A%7D%0A%0AMEDIA_ROOT = ''%0AMEDIA_URL = ''%0ASTATIC_ROOT = ''%0ASTATIC_URL = '/static/'%0AADMIN_MEDIA_PREFIX = '/static/admin/'%0A%0AADMINS = (%0A ('admin', '[email protected]'),%0A)%0AMANAGERS = ADMINS%0A%0ALANDING_ROOT = '/tmp/landing/'%0A%0ACLUSTER_CONF = %7B%0A 'host': 'localhost',%0A 'port': 9888,%0A 'mongobase': 'mongodb://pshdp04',%0A%7D%0A%0ALOGGING = %7B%0A 'version': 1,%0A 'disable_existing_loggers': False,%0A 'handlers': %7B%0A 'mail_admins': %7B%0A 'level': 'ERROR',%0A 'class': 'django.utils.log.AdminEmailHandler'%0A %7D,%0A 'console': %7B%0A 'level': 'DEBUG',%0A 'class': 'logging.StreamHandler',%0A 'formatter': 'simple'%0A %7D,%0A 'null': %7B%0A 'level': 'DEBUG',%0A 'class': 'django.utils.log.NullHandler',%0A %7D%0A %7D,%0A 'formatters': %7B%0A 'verbose': %7B%0A 'format': '%25(levelname)s %25(asctime)s %25(module)s %25(message)s'%0A %7D,%0A 'simple': %7B%0A 'format': '%25(levelname)s %25(message)s'%0A %7D,%0A %7D,%0A 'loggers': %7B%0A 'django.request': %7B%0A 'handlers': %5B'console', 'mail_admins'%5D,%0A 'level': 'ERROR',%0A 'propagate': True,%0A %7D,%0A 'bdp_fe': %7B%0A 'handlers': %5B'console', 'mail_admins'%5D,%0A 'level': 'DEBUG',%0A 'propagate': True,%0A %7D%0A %7D%0A%7D%0A
|
|
70d912bfb1ccec03edfe92b9b2c87610346c8f42
|
Add blocking migration for new domain db
|
corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py
|
corehq/doctypemigrations/migrations/0006_domain_migration_20151118.py
|
Python
| 0 |
@@ -0,0 +1,476 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Afrom corehq.doctypemigrations.djangomigrations import assert_initial_complete%0Afrom corehq.doctypemigrations.migrator_instances import domains_migration%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('doctypemigrations', '0005_auto_20151013_0819'),%0A %5D%0A%0A operations = %5B%0A migrations.RunPython(assert_initial_complete(domains_migration))%0A %5D%0A
|
|
0378f8cde69a18d954341b861a724592ef7a5949
|
Extend RANSAC example with comparison to BaggingRegressor
|
examples/linear_model/plot_ransac.py
|
examples/linear_model/plot_ransac.py
|
"""
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model
# Set random seed for both equal data noise and equal random sample selection
np.random.seed(seed=1)
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add faulty data
faulty = np.array(30 * [(180, -100)], dtype=np.double)
faulty += 5 * np.random.normal(size=faulty.shape)
data[:faulty.shape[0]] = faulty
# Add gaussian noise to coordinates
noise = np.random.normal(size=data.shape)
data += 0.5 * noise
data[::2] += 5 * noise[::2]
data[::4] += 20 * noise[::4]
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_robust = linear_model.RANSAC(linear_model.LinearRegression())
model_robust.fit(X, y)
inlier_mask = model_robust.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Generate coordinates of estimated models
line_X = np.arange(-250, 250)
line_y = model.predict(line_X[:, np.newaxis])
line_y_robust = model_robust.predict(line_X[:, np.newaxis])
plt.plot(data[inlier_mask, 0], data[inlier_mask, 1], '.g',
label='Inlier data')
plt.plot(data[outlier_mask, 0], data[outlier_mask, 1], '.r',
label='Outlier data')
plt.plot(line_X, line_y, '-k', label='Linear model from all data')
plt.plot(line_X, line_y_robust, '-b', label='Robustly fitted linear model')
plt.legend(loc='lower left')
plt.show()
|
Python
| 0 |
@@ -325,16 +325,26 @@
ar_model
+, ensemble
%0A%0A%0A# Set
@@ -1043,21 +1043,21 @@
%0Amodel_r
-obust
+ansac
= linea
@@ -1111,21 +1111,21 @@
%0Amodel_r
-obust
+ansac
.fit(X,
@@ -1148,21 +1148,21 @@
model_r
-obust
+ansac
.inlier_
@@ -1211,16 +1211,171 @@
_mask)%0A%0A
+# Robustly fit linear model with bagged linear regressor%0Amodel_bagged = ensemble.BaggingRegressor(linear_model.LinearRegression())%0Amodel_bagged.fit(X, y)%0A%0A
# Genera
@@ -1493,21 +1493,21 @@
line_y_r
-obust
+ansac
= model
@@ -1508,21 +1508,81 @@
model_r
-obust
+ansac.predict(line_X%5B:, np.newaxis%5D)%0Aline_y_bagged = model_bagged
.predict
@@ -1685,19 +1685,22 @@
el='
-Inlier data
+RANSAC inliers
')%0Ap
@@ -1779,20 +1779,23 @@
el='
-O
+RANSAC o
utlier
- data
+s
')%0Ap
@@ -1842,27 +1842,17 @@
ear
-model from all data
+regressor
')%0Ap
@@ -1875,21 +1875,21 @@
line_y_r
-obust
+ansac
, '-b',
@@ -1900,35 +1900,88 @@
l='R
-obustly fitted linear model
+ANSAC regressor')%0Aplt.plot(line_X, line_y_bagged, '-y', label='Bagging regressor
')%0Ap
|
d0e5ea752912b10e473b2a05da9196800eb6ca86
|
Add an example for the RedisLock
|
examples/redis_lock.py
|
examples/redis_lock.py
|
Python
| 0.000003 |
@@ -0,0 +1,1219 @@
+import random%0A%0Afrom diesel import fork, quickstop, quickstart, sleep%0Afrom diesel.protocols.redis import RedisClient, RedisTransactionError, RedisLock, LockNotAcquired%0A%0A%0A%22%22%22Implement the Redis INCR command using a lock. Obviously this is inefficient, but it's a good%0Aexample of how to use the RedisLock class%22%22%22%0A%0Akey = 'test-lock-key'%0Aincr_key = 'test-incr-key'%0Acounter = 0%0A%0A%0A%22%22%22If sleep_factor %3E lock_timeout you are exercising the timeout loop, otherwise, that loop should be a noop%22%22%22%0Alock_timeout = 3%0Asleep_factor = 1%0A%0A%0A%0Adef take_lock():%0A global counter%0A client = RedisClient('localhost', 6379)%0A try:%0A with RedisLock(client, key, timeout=lock_timeout) as lock:%0A v = client.get(incr_key)%0A sleep(random.random() * sleep_factor)%0A client.set(incr_key, int(v) + 1)%0A counter += 1%0A except LockNotAcquired:%0A pass%0A%0Adef main():%0A client = RedisClient('localhost', 6379)%0A client.delete(key)%0A client.set(incr_key, 0)%0A%0A for _ in xrange(500):%0A fork(take_lock)%0A if random.random() %3E 0.1:%0A sleep(random.random() / 10)%0A sleep(2)%0A assert counter == int(client.get(incr_key)), 'Incr failed!'%0A quickstop()%0A%0A%0Aquickstart(main)%0A
|
|
1d77849b048c424ebc042a61c047c2c74e27277f
|
minus 1
|
leetcode_python/zigzag_conversion.py
|
leetcode_python/zigzag_conversion.py
|
Python
| 0.999994 |
@@ -0,0 +1,486 @@
+class Solution:%0D%0A # @return a string%0D%0A def convert(self, s, nRows):%0D%0A if nRows == 1:%0D%0A return s%0D%0A result = %5B%5B%5D for i in range(nRows)%5D%0D%0A for i, c in enumerate(s):%0D%0A if (i / (nRows - 1)) %25 2 == 0:%0D%0A result%5Bi %25 (nRows - 1)%5D.append(c)%0D%0A else:%0D%0A result%5BnRows - 1 - i %25 (nRows - 1)%5D.append(c)%0D%0A result = %5B''.join(row) for row in result%5D%0D%0A result = ''.join(result)%0D%0A return result%0D%0A
|
|
d0b8c68ae3c8acbc3d5dfe13842e3c41a198b978
|
Add script to fix all notions
|
fix_notions_db.py
|
fix_notions_db.py
|
Python
| 0.000001 |
@@ -0,0 +1,176 @@
+from alignements_backend.db import DB%0Afrom alignements_backend.notion import Notion%0A%0Afor notion in DB.scan_iter(match='notion:*'):%0A n = Notion(list(DB.sscan_iter(notion)))%0A%0A
|
|
ad6e0bad22b0c5b0e6f97ceb13694ab804041443
|
Add model resources.
|
tracker/api.py
|
tracker/api.py
|
Python
| 0 |
@@ -0,0 +1,713 @@
+from tastypie.resources import ModelResource%0Afrom tracker.models import Task, WorkSession%0Afrom django.contrib.auth.models import User%0Afrom tastypie import fields%0A%0A%0Aclass UserResource(ModelResource):%0A class Meta:%0A queryset = User.objects.all()%0A resource_name = 'user'%0A%0A%0Aclass TaskResource(ModelResource):%0A user = fields.ForeignKey(UserResource, 'user')%0A %0A class Meta:%0A queryset = Task.objects.all()%0A resource_name = 'task'%0A%0A%0Aclass WorkSessionResource(ModelResource):%0A user = fields.ForeignKey(UserResource, 'user')%0A task = fields.ForeignKey(TaskResource, 'task')%0A %0A class Meta:%0A queryset = WorkSession.objects.all()%0A resource_name = 'work_session'%0A
|
|
c97680113fb25ed43e96c26d02bfd57e15e427b8
|
Add missing migrations
|
nodeconductor/billing/migrations/0004_invoice_usage_pdf.py
|
nodeconductor/billing/migrations/0004_invoice_usage_pdf.py
|
Python
| 0 |
@@ -0,0 +1,472 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('billing', '0003_invoice_status'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='invoice',%0A name='usage_pdf',%0A field=models.FileField(null=True, upload_to=b'invoices_usage', blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
3d19606b83f6a4a7906f88b15c6e215620394560
|
Implemented the Ford-Fulkerson algorithm
|
ford_fulkerson.py
|
ford_fulkerson.py
|
Python
| 0.998064 |
@@ -0,0 +1,1548 @@
+#!/usr/bin/env python%0A#coding: UTF-8%0A#%0A# Implementation of the Ford-Fulkerson algorithm to solve the maximum flow problem.%0A#%0A# Copyright (c) 2013 Samuel Gro%C3%9F%0A#%0A%0Afrom graph import *%0Afrom basics import depth_first_search%0A%0A%0Adef solve_max_flow_ff(graph, s, t):%0A %22%22%22%0A Solves the maximum flow prolem using the ford-fulkerson algorithm for the given %0A graph and source/target node.%0A %22%22%22%0A%0A while True:%0A path = depth_first_search(graph, s, t)%0A if path is None:%0A break%0A%0A # find maximum capacity on the current path%0A min_capacity = None%0A for edge in path.edges():%0A if min_capacity is None or edge.capacity %3C min_capacity:%0A min_capacity = edge.capacity%0A%0A # subtract min_capacity from all edges and add return edge%0A for edge in path.edges():%0A edge.capacity -= min_capacity%0A if not graph.has_reverse_edge(edge):%0A graph.add_edge(edge.destination(), edge.source(), %7B%22capacity%22 : min_capacity, %22tmp%22 : True%7D)%0A else: %0A graph.get_reverse_edge(edge).capacity += min_capacity%0A if edge.capacity == 0:%0A graph.remove_edge(edge)%0A%0A # reverse edges and cleanup%0A for edge in graph.edges():%0A if hasattr(edge, %22tmp%22):%0A if graph.has_reverse_edge(edge):%0A graph.get_reverse_edge(edge).load = edge.capacity%0A else:%0A graph.add_edge(edge.destination(), edge.source(), %7B%22load%22 : edge.capacity%7D)%0A graph.remove_edge(edge)%0A
|
|
321463a5d7f102431ed286d57d1a8fa8c576cca7
|
add plotting fns
|
terrapin/plot.py
|
terrapin/plot.py
|
Python
| 0.000001 |
@@ -0,0 +1,67 @@
+import matplotlib.pyplot as plt%0A%0A%0Adef flow_grid(dem, angles):%0A%09pass
|
|
8827eb9dbab6ca325843da4ec8da2aaa5af87bce
|
Revert D6097465: Move .buckd/tmp/ to buck-out/tmp/buckd/.
|
programs/buck_project.py
|
programs/buck_project.py
|
from __future__ import print_function
import errno
import os
import tempfile
import textwrap
import shutil
import sys
import file_locks
from tracing import Tracing
import hashlib
def get_file_contents_if_exists(path, default=None):
with Tracing('BuckProject.get_file_contents_if_it_exists', args={'path': path}):
if not os.path.exists(path):
return default
with open(path) as f:
contents = f.read().strip()
return default if not contents else contents
def write_contents_to_file(path, contents):
with Tracing('BuckProject.write_contents_to_file', args={'path': path}):
with open(path, 'w') as output_file:
output_file.write(str(contents))
def makedirs(path):
try:
os.makedirs(path)
except OSError, e:
# Potentially the case that multiple processes are running in parallel
# (e.g. a series of linters running buck query without buckd), so we
# should just swallow the error.
# This is mostly equivalent to os.makedirs(path, exist_ok=True) in
# Python 3.
if e.errno != errno.EEXIST and os.path.isdir(path):
raise
class BuckProject:
def __init__(self, root):
self.root = root
self._buck_out = os.path.join(root, "buck-out")
self._buck_out_tmp = os.path.join(self._buck_out, "tmp")
makedirs(self._buck_out_tmp)
self._buck_out_log = os.path.join(self._buck_out, "log")
makedirs(self._buck_out_log)
self.tmp_dir = tempfile.mkdtemp(prefix="buck_run.", dir=self._buck_out_tmp)
# Only created if buckd is used.
self.buckd_tmp_dir = None
self.buckd_dir = os.path.join(root, ".buckd")
self.buckd_version_file = os.path.join(self.buckd_dir, "buckd.version")
buck_javaargs_path = os.path.join(self.root, ".buckjavaargs")
self.buck_javaargs = get_file_contents_if_exists(buck_javaargs_path)
buck_javaargs_path_local = os.path.join(
self.root, ".buckjavaargs.local")
self.buck_javaargs_local = get_file_contents_if_exists(
buck_javaargs_path_local)
def get_root_hash(self):
return hashlib.sha256(self.root).hexdigest()
def get_buckd_transport_file_path(self):
if os.name == 'nt':
return ur'\\.\pipe\buckd_{0}'.format(self.get_root_hash())
else:
return os.path.join(self.buckd_dir, 'sock')
def get_buckd_transport_address(self):
if os.name == 'nt':
return 'local:buckd_{0}'.format(self.get_root_hash())
else:
return 'local:.buckd/sock'
def get_running_buckd_version(self):
return get_file_contents_if_exists(self.buckd_version_file)
def get_buck_out_log_dir(self):
return self._buck_out_log
def clean_up_buckd(self):
with Tracing('BuckProject.clean_up_buckd'):
if os.path.exists(self.buckd_dir):
file_locks.rmtree_if_can_lock(self.buckd_dir)
def create_buckd_tmp_dir(self):
if self.buckd_tmp_dir is not None:
return self.buckd_tmp_dir
makedirs(self.buckd_dir)
tmp_dir_parent = os.path.join(self._buck_out_tmp, "buckd")
makedirs(tmp_dir_parent)
self.buckd_tmp_dir = tempfile.mkdtemp(prefix="buck_run.",
dir=tmp_dir_parent)
return self.buckd_tmp_dir
def save_buckd_version(self, version):
write_contents_to_file(self.buckd_version_file, version)
@staticmethod
def from_current_dir():
with Tracing('BuckProject.from_current_dir'):
current_dir = os.getcwd()
if '--version' in sys.argv or '-V' in sys.argv:
return BuckProject(current_dir)
at_root_dir = False
while not at_root_dir:
if os.path.exists(os.path.join(current_dir, ".buckconfig")):
return BuckProject(current_dir)
parent_dir = os.path.dirname(current_dir)
at_root_dir = current_dir == parent_dir
current_dir = parent_dir
raise NoBuckConfigFoundException()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
with Tracing('BuckProject.__exit__'):
if os.path.exists(self.tmp_dir):
try:
shutil.rmtree(self.tmp_dir)
except OSError as e:
if e.errno != errno.ENOENT:
raise
class NoBuckConfigFoundException(Exception):
def __init__(self):
no_buckconfig_message_path = ".no_buckconfig_message"
default_message = textwrap.dedent("""\
This does not appear to be the root of a Buck project. Please 'cd'
to the root of your project before running buck. If this really is
the root of your project, run
'touch .buckconfig'
and then re-run your buck command.""")
message = get_file_contents_if_exists(no_buckconfig_message_path, default_message)
Exception.__init__(self, message)
|
Python
| 0.000026 |
@@ -1300,38 +1300,32 @@
k-out%22)%0A
-self._
buck_out_tmp = o
@@ -1368,38 +1368,32 @@
makedirs(
-self._
buck_out_tmp)%0A
@@ -1556,22 +1556,16 @@
.%22, dir=
-self._
buck_out
@@ -3111,41 +3111,8 @@
dir%0A
- makedirs(self.buckd_dir)%0A
@@ -3154,29 +3154,23 @@
elf.
-_
buck
-_out_tmp, %22buckd
+d_dir, %22tmp
%22)%0A
|
ed23fb301503d331af243a37d1b0a934d5d2f21c
|
add laser plugin object
|
mythril/laser/ethereum/plugins/plugin.py
|
mythril/laser/ethereum/plugins/plugin.py
|
Python
| 0 |
@@ -0,0 +1,910 @@
+from mythril.laser.ethereum.svm import LaserEVM%0A%0A%0Aclass LaserPlugin:%0A %22%22%22 Base class for laser plugins%0A%0A Functionality in laser that the symbolic execution process does not need to depend on%0A can be implemented in the form of a laser plugin.%0A%0A Laser plugins implement the function initialize(symbolic_vm) which is called with the laser virtual machine%0A when they are loaded.%0A Regularly a plugin will introduce several hooks into laser in this function%0A%0A Plugins can direct actions by raising Signals defined in mythril.laser.ethereum.plugins.signals%0A For example, a pruning plugin might raise the PluginSkipWorldState signal.%0A %22%22%22%0A%0A def initialize(self, symbolic_vm: LaserEVM):%0A %22%22%22 Initializes this plugin on the symbolic virtual machine%0A%0A :param symbolic_vm: symbolic virtual machine to initialize the laser plugin on%0A %22%22%22%0A raise NotImplementedError%0A
|
|
550469032843eb2af3b4a9faaed34d9754f00700
|
Add command to test managers emails
|
geotrek/common/management/commands/test_managers_emails.py
|
geotrek/common/management/commands/test_managers_emails.py
|
Python
| 0.000002 |
@@ -0,0 +1,426 @@
+from django.core.mail import mail_managers%0Afrom django.core.management.base import BaseCommand%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Test if email settings are OK by sending mail to site managers%22%0A%0A def execute(self, *args, **options):%0A%0A subject = u'Test email for managers'%0A message = u'If you receive this email, it seems that conf is OK !'%0A%0A mail_managers(subject, message, fail_silently=False)%0A
|
|
edb9500824faffd9f1d0d1b59ca29966e3b18282
|
Customize behave formatter to output json
|
modules/formatter_record.py
|
modules/formatter_record.py
|
Python
| 0.000001 |
@@ -0,0 +1,3401 @@
+from behave.formatter.json import PrettyJSONFormatter%0Afrom pprint import pprint%0A%0Aclass RecordFormatter(PrettyJSONFormatter):%0A name = %22super%22%0A description = %22Formatter for adding REST calls to JSON output.%22%0A jsteps = %7B%7D # Contains an array of features, that contains array of steps in each feature%0A%0A # Overriding Background Function. This runs evertime a Background is ran.%0A # This step%0A def background(self, background):%0A # Let the parent run first%0A super(RecordFormatter, self).background(background)%0A # Check if the current feature has a name - Could be removed%0A if (self.isnotBackground()):%0A # Remove all the background steps from our jsteps, as they are not required%0A for step in background.steps:%0A self.jsteps%5Bself.current_feature_element.name%5D.pop(0)%0A%0A # Overriding Step feature. This is called everytime a step is found in feature file. This happens before the feature/scenario are executed.%0A def step(self, step):%0A # Let the parent run first%0A super(RecordFormatter, self).step(step)%0A # Check if the current feature has a name - Could be removed%0A if (self.isnotBackground()):%0A # Append the step into our own collection of jsteps.%0A self.jsteps%5Bself.current_feature_element%5B'name'%5D%5D.append(step);%0A%0A # Overriding End of Feature. This is ran once the entire feature has completed running%0A def eof(self):%0A # Iterate through each scenarios%0A for scenario in self.current_feature_data%5B'elements'%5D:%0A # Check if Scenario valid%0A if (scenario%5B'name'%5D != ''):%0A steps = scenario%5B'steps'%5D%0A jscenariosteps = self.jsteps%5Bscenario%5B'name'%5D%5D%0A status = %22passed%22 # Add Scenario status%0A # Iterate through the jstep, and step results%0A for (j, jstep) in enumerate(jscenariosteps):%0A # Check if any of the above status failed, if so, mark the status as failed%0A if ('result' in steps%5Bj%5D):%0A if steps%5Bj%5D%5B'result'%5D%5B'status'%5D == 'failed':%0A status = 'failed'%0A # Add configurations in scenario level. generally used for sdk_language and sdk_version%0A if (hasattr(jstep, %22details%22)):%0A scenario%5B'details'%5D = jstep.details%0A if (hasattr(jstep, %22date%22)):%0A steps%5Bj%5D%5B'date'%5D = jstep.date%0A # Check if jstep has attribute calls, where our custom data is stored - Could be generalized further%0A if (hasattr(jstep, %22calls%22) and 'result' in steps%5Bj%5D):%0A # add the calls to our step object, that would be later added to json output.%0A steps%5Bj%5D%5B'result'%5D%5B'calls'%5D = jstep.calls%0A # Add feature name and Status as a part of scenario%0A scenario%5B'feature'%5D = self.current_feature.name%0A scenario%5B'status'%5D = status%0A # Let the parent run last here%0A super(RecordFormatter, self).eof()%0A%0A def isnotBackground(self):%0A if(self.current_feature_element%5B'name'%5D != ''):%0A if(self.current_feature_element%5B'name'%5D not in self.jsteps):%0A self.jsteps%5Bself.current_feature_element%5B'name'%5D%5D = %5B%5D%0A return True%0A return False%0A
|
|
def7e3aeaf3b0cd1a6486c72c68a3baad77ef3e5
|
Create leetcode-50.py
|
python_practice/leetCode/leetcode-50.py
|
python_practice/leetCode/leetcode-50.py
|
Python
| 0.000004 |
@@ -0,0 +1,295 @@
+class Solution:%0A def myPow(self, x: 'float', n: 'int') -%3E 'float':%0A return x**n%0A%0A def myPow2(self, x: 'float', n: 'int') -%3E 'float':%0A if n == 0:%0A return 1%0A if n %3C 0:%0A n = 0-n%0A x = 1/x%0A %0A return x**(n%252)*myPow2(x*x, n//2)%0A
|
|
71e431a5eccc6483847888fb0f8f5f30f182913a
|
add a script to convert xml documentation into json
|
doc/xmldoc2json.py
|
doc/xmldoc2json.py
|
Python
| 0.000003 |
@@ -0,0 +1,2475 @@
+#!/usr/bin/python%0Aimport sys%0Aimport xml.etree.ElementTree as ET%0Aimport json%0A%0Adef parseClass(data):%0A dictCls = dict(data.attrib)%0A dictCls%5B'brief_description'%5D = data.find(%22brief_description%22).text.strip()%0A dictCls%5B'description'%5D = data.find(%22description%22).text.strip()%0A dictCls%5B'methods'%5D = %5B%5D%0A for m in data.find(%22methods%22):%0A dictCls%5B'methods'%5D.append(parseMethod(m))%0A dictCls%5B'signals'%5D = %5B%5D%0A for s in (data.find(%22signals%22) if data.find(%22signals%22) is not None else %5B%5D):%0A dictCls%5B'signals'%5D.append(parseMethod(s))%0A dictCls%5B'constants'%5D = %5B%5D%0A for c in (data.find(%22constants%22) if data.find(%22constants%22) is not None else %5B%5D):%0A dictCls%5B'constants'%5D.append(parseConstant(c))%0A dictCls%5B'properties'%5D = %5B%5D%0A for m in (data.find(%22members%22) if data.find(%22members%22) is not None else %5B%5D):%0A dictCls%5B'properties'%5D.append(parseProperty(m))%0A dictCls%5B'theme_properties'%5D = %5B%5D%0A for thi in (data.find(%22theme_items%22) if data.find(%22theme_items%22) is not None else %5B%5D):%0A dictCls%5B'theme_properties'%5D.append(parseProperty(thi))%0A return dictCls%0A%0Adef parseMethod(data):%0A dictMethod = dict(data.attrib)%0A dictMethod%5B'description'%5D = data.find(%22description%22).text.strip()%0A dictMethod%5B'return_type'%5D = data.find(%22return%22).attrib%5B%22type%22%5D if data.find(%22return%22) is not None else %22%22%0A if %22qualifiers%22 not in dictMethod: dictMethod%5B%22qualifiers%22%5D = %22%22%0A dictMethod%5B%22arguments%22%5D = %5B%5D%0A for arg in data.iter('argument'):%0A dictMethod%5B%22arguments%22%5D.append(parseArgument(arg))%0A return dictMethod%0A%0Adef parseArgument(data):%0A dictArg = dict(data.attrib)%0A if %22dictArg%22 in dictArg: dictArg.pop(%22index%22)%0A dictArg%5B%22default_value%22%5D = dictArg%5B%22default%22%5D if %22default%22 in dictArg else %22%22%0A if %22default%22 in dictArg: dictArg.pop(%22default%22)%0A return dictArg%0A%0Adef parseConstant(data):%0A dictConst = dict(data.attrib)%0A dictConst%5B%22description%22%5D = data.text.strip()%0A return dictConst%0A%0Adef parseProperty(data):%0A dictProp = dict(data.attrib)%0A dictProp%5B%22description%22%5D = data.text.strip()%0A return dictProp%0A%0Adef main():%0A if len(sys.argv) %3E=2 :%0A tree = ET.parse(open(sys.argv%5B1%5D, 'r'))%0A classes = %7B%7D%0A for cls in tree.getroot():%0A dictCls = parseClass(cls)%0A classes%5BdictCls%5B'name'%5D%5D = dictCls%0A jsonContent = json.dumps(%7B%22classes%22: classes, %22version%22: %222.1.3%22%7D, ensure_ascii=False, indent=2)%0A print(jsonContent)%0A%0Aif __name__ == '__main__':%0A main()%0A%0A
|
|
4ca8d43d8e6ec243d9812bb313a8e7a21ad781ea
|
Add DB exercise.
|
Exercise/DB.py
|
Exercise/DB.py
|
Python
| 0 |
@@ -0,0 +1,488 @@
+import mysql.connector%0A%0Aconn = mysql.connector.connect(user='root', password='blue', database='test')%0Acursor = conn.cursor()%0A%0Acursor.execute('create table user (id varchar(20) primary key, name varchar(20))')%0Acursor.execute('insert into user (id, name) values (%25s, %25s)', %5B'1', 'Dai'%5D)%0Aprint(cursor.rowcount)%0Aconn.commit()%0Acursor.close()%0Acursor = conn.cursor()%0Acursor.execute('select * from user where id = %25s', ('1',))%0Avalues = cursor.fetchall()%0Aprint(values)%0Acursor.close()%0Aconn.close()%0A
|
|
c4e1e034a3f0be3590dc78c5683d9deaf44d696f
|
add example of escape character
|
scripts/escape/backslash.py
|
scripts/escape/backslash.py
|
Python
| 0.000001 |
@@ -0,0 +1,517 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0Aimport json%0A%0A'''%0AThis tests the acceptance of backslashes%0A%5C%5Cf should be okay%0A%5Cf is not necessarily okay, because json.dumps will not dump this%0A'''%0A%0Aprint json.dumps(%7B%0A %22foogroup%22: %7B%0A %22hosts%22: %5B%0A %22foobar%22%0A %5D%0A %7D,%0A %22_meta%22: %7B%0A %22hostvars%22: %7B%0A %22foobar%22: %7B%0A %22host_specific_var%22: %22ba%5Cfrrr%22,%0A %22from_issue%22: %22H%25%5D~7%5Cf0$ and this... O'Jw%5Cu00188%5Cu0006%5Cb... %22%0A %7D%0A %7D%0A %7D%0A%7D, indent=4)
|
|
bfbd2c792aacd307f8d7ed68ea0f2a7db681431d
|
add functions that generate mask image of the target bin
|
jsk_apc2016_common/python/jsk_apc2016_common/mask_bin.py
|
jsk_apc2016_common/python/jsk_apc2016_common/mask_bin.py
|
Python
| 0.000003 |
@@ -0,0 +1,2023 @@
+#!/usr/bin/env python%0A%0Aimport numpy as np%0Afrom matplotlib.path import Path%0Aimport jsk_apc2016_common.segmentation_helper as helper%0Afrom tf2_geometry_msgs import do_transform_point%0A%0A%0Adef get_mask_img(transform, target_bin, camera_model):%0A %22%22%22%0A :param point: point that is going to be transformed%0A :type point: PointStamped%0A :param transform: camera_frame -%3E bbox_frame%0A :type transform: Transform%0A %22%22%22%0A # check frame_id of a point and transform just in case%0A assert camera_model.tf_frame == transform.header.frame_id%0A assert target_bin.bbox.header.frame_id == transform.child_frame_id%0A%0A transformed_list = %5B%0A do_transform_point(corner, transform)%0A for corner in target_bin.corners%5D%0A projected_points = project_points(transformed_list, camera_model)%0A%0A # generate an polygon that covers the region%0A path = Path(projected_points)%0A x, y = np.meshgrid(%0A np.arange(camera_model.width),%0A np.arange(camera_model.height))%0A x, y = x.flatten(), y.flatten()%0A points = np.vstack((x, y)).T%0A mask_img = path.contains_points(%0A points).reshape(%0A camera_model.height, camera_model.width%0A ).astype('bool')%0A return mask_img%0A%0A%0Adef project_points(points, camera_model):%0A %22%22%22%0A :param points: list of geometry_msgs.msg.PointStamped%0A :type list of stamped points :%0A :param projected_points: list of camera_coordinates%0A :type projected_points: (u, v)%0A%0A The frames of the points and the camera_model are same.%0A %22%22%22%0A # generate mask iamge%0A for point in points:%0A if point.header.frame_id != camera_model.tf_frame:%0A raise ValueError('undefined')%0A if len(points) != 4:%0A raise ValueError('undefined')%0A%0A projected_points = %5B%5D%0A for point in points:%0A projected_points.append(%0A camera_model.project3dToPixel(%0A helper.list_from_point(point.point)%0A )%0A )%0A return projected_points%0A
|
|
852c6639bb0a71b9ef2dd81b2830193d0c9fe23d
|
Create FractalPoke.py
|
FractalPoke.py
|
FractalPoke.py
|
Python
| 0.000001 |
@@ -0,0 +1,2356 @@
+bl_info = %7B%0A %22name%22: %22FractalPoke%22,%0A %22author%22: %22Christopher Kopic%22,%0A %22version%22: (1, 0),%0A %22blender%22: (2, 7, 8),%0A %22location%22: %22%22,%0A %22description%22: %22Iterative Poking inspired by Simon Holmedal's Always Forever%22,%0A %22warning%22: %22%22,%0A %22wiki_url%22: %22%22,%0A %22tracker_url%22: %22%22,%0A %22category%22: %22Mesh%22%7D%0A%0A%0Aimport bpy%0Afrom bpy.types import Operator%0Afrom bpy.props import FloatProperty, IntProperty, BoolProperty%0A%0Aclass FractalPoke(bpy.types.Operator):%0A %22%22%22Fractal Poke%22%22%22%0A bl_idname = %22mesh.fractal_poke%22%0A bl_label = %22Fractal Poke%22%0A bl_options = %7B'REGISTER', 'UNDO'%7D%0A%0A iterations = IntProperty(%0A name = %22Iterations%22,%0A default = 3,%0A min = 1,%0A description = %22Be careful as complexity will increase exponentially%22%0A )%0A%0A start_offset = FloatProperty(%0A name = %22Start Offset%22,%0A default = 1.0,%0A description = %22Offset for first poke iteration%22%0A )%0A%0A offset_multiplier = FloatProperty(%0A name = %22Offset Multiplier%22,%0A default = 0.5,%0A description = %22Increases or decreases offset for each iteration%22%0A )%0A%0A offset_flip = BoolProperty(%0A name = %22Flip Offset%22,%0A default = False,%0A description = %22Flips offsetting inward or outward for each iteration%22%0A )%0A%0A grow_selection = BoolProperty(%0A name = %22Grow Selection%22,%0A default = False,%0A description = %22Grows selection for each iteration%22%0A )%0A%0A shrink_selection = BoolProperty(%0A name = %22Shrink Selection%22,%0A default = False,%0A description = %22Shrinks selection for each iteration%22%0A )%0A%0A def execute(self, context):%0A my_offset = self.start_offset%0A%0A for i in range(self.iterations):%0A bpy.ops.mesh.poke(offset = my_offset)%0A my_offset *= self.offset_multiplier%0A%0A if self.offset_flip:%0A my_offset *= -1%0A%0A if self.grow_selection:%0A bpy.ops.mesh.select_more()%0A%0A if self.shrink_selection:%0A bpy.ops.mesh.select_less()%0A%0A return %7B'FINISHED'%7D%0A%0A @classmethod%0A def poll(cls, context):%0A ob = context.active_object%0A return ob is not None and ob.mode == 'EDIT'%0A %0Adef register():%0A bpy.utils.register_class(FractalPoke)%0A%0Adef unregister():%0A bpy.utils.unregister_class(FractalPoke)%0A%0Aif __name__ == %22__main__%22:%0A register()%0A
|
|
2dff378e7f446e83aa7c105bded3f3330fe9fa20
|
Add a script to generate a Javascript file encoding_<enc>.js containing encoding and decoding tables for the specified <enc> encoding. Uses Unicode table at location http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/<enc>.TXT. Related to issue #1541.
|
scripts/make_encoding_js.py
|
scripts/make_encoding_js.py
|
Python
| 0 |
@@ -0,0 +1,1275 @@
+%22%22%22Create a Javascript script to encode / decode for a specific encoding%0Adescribed in a file available at%0Ahttp://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/%3CENCODING%3E.TXT%0A%22%22%22%0A%0Aimport os%0Aimport re%0Aimport json%0Aimport urllib.request%0A%0Aline_re = re.compile(%22%5E(0x%5BA-Z0-9%5D+)%5Cs+(0x%5BA-Z0-9%5D+)*%22, re.M)%0A%0Atmpl = %22http://unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/%7B%7D.TXT%22%0Aencoding = input(%22Encoding name: %22)%0Areq = urllib.request.urlopen(tmpl.format(encoding.upper()))%0Adata = req.read().decode(%22ascii%22)%0A%0Aroot_dir = os.path.dirname(os.path.dirname(__file__))%0Alibs_dir = os.path.join(root_dir, %22www%22, %22src%22, %22libs%22)%0Afilename = os.path.join(libs_dir, f%22encoding_%7Bencoding.lower()%7D.js%22)%0Awith open(filename, %22w%22, encoding=%22utf-8%22) as out:%0A out.write(%22var _table = %5B%22)%0A for line in data.split(%22%5Cn%22):%0A mo = line_re.match(line)%0A if mo:%0A key, value = mo.groups()%0A out.write(f%22%7Bkey%7D, %7Bvalue or -1%7D,%22)%0A out.write(%22%5D%5Cn%22)%0A out.write(%22var decoding_table = %5B%5D,%5Cn encoding_table = %5B%5D%5Cn%22)%0A out.write(%22%22%22for(var i = 0, len = _table.length; i %3C len; i += 2)%7B%0Avar value = _table%5Bi + 1%5D%0Aif(value !== null)%7B%0A encoding_table%5Bvalue%5D = _table%5Bi%5D%0A%7D%0Adecoding_table%5B_table%5Bi%5D%5D = _table%5Bi + 1%5D%0A%7D%0A$module = %7Bencoding_table, decoding_table%7D%0A%22%22%22)%0A
|
|
a7a0d17fdf67f2176d19e857bade1e406c39bebb
|
Update `Inventory` creation such that if a host is defined multiple times, it will receive combined data.
|
pyinfra/api/inventory.py
|
pyinfra/api/inventory.py
|
# pyinfra
# File: pyinfra/api/inventory.py
# Desc: represents a pyinfra inventory
from .host import Host
from .attrs import AttrData
class Inventory(object):
'''
Represents a collection of target hosts. Stores and provides access to group data,
host data and default data for these hosts.
Args:
names_data: tuple of ``(names, data)``
ssh_user: default SSH user
ssh_port: default SSH port
ssh_key: default SSH key filename
ssh_key_password: default password for the SSH key
ssh_password: default SSH password
**groups: map of group names -> ``(names, data)``
'''
state = None
def __init__(
self, names_data,
ssh_user=None, ssh_port=None, ssh_key=None,
ssh_key_password=None, ssh_password=None, **groups
):
names, data = names_data
self.connected_hosts = set()
self.groups = {}
self.host_data = {}
self.group_data = {}
# In CLI mode these are --user, --key, etc
self.override_data = AttrData({
'ssh_user': ssh_user,
'ssh_key': ssh_key,
'ssh_key_password': ssh_key_password,
'ssh_port': ssh_port,
'ssh_password': ssh_password
})
self.data = AttrData(data)
# Loop groups and build map of name -> groups
names_to_groups = {}
for group_name, (group_names, group_data) in groups.iteritems():
group_name = group_name.lower()
self.group_data[group_name] = AttrData(group_data)
for name in group_names:
name = name[0] if isinstance(name, tuple) else name
names_to_groups.setdefault(name, []).append(group_name)
# Build the actual Host instances
hosts = {}
for name in names:
host_data = {}
if isinstance(name, tuple):
name, host_data = name
self.host_data[name] = AttrData(host_data)
# Create the Host
host = Host(self, name, names_to_groups.get(name))
hosts[name] = host
# Push into any groups
for groupname in names_to_groups.get(name, []):
self.groups.setdefault(groupname, []).append(host)
self.hosts = hosts
def __getitem__(self, key):
'''
Get individual hosts from the inventory by name.
'''
return self.hosts.get(key)
def __getattr__(self, key):
'''
Get groups (lists of hosts) from the inventory by name.
'''
return self.groups.get(key)
def __len__(self):
'''
Returns a list of all hosts, connected or not.
'''
return len(self.hosts)
def __iter__(self):
'''
Iterates over inventory hosts. Uses connected hosts only when they exist - in that
sense can be seen as the "active" list of hosts during a deploy.
'''
for host in self.hosts.values():
if not self.connected_hosts:
yield host
elif host.name in self.connected_hosts:
yield host
def get_data(self):
'''
Get the base/all data attached to this inventory.
'''
return self.data
def get_override_data(self):
'''
Get override data for this inventory.
'''
return self.override_data
def get_host_data(self, hostname):
'''
Get data for a single host in this inventory.
'''
return self.host_data[hostname]
def get_group_data(self, group):
'''
Get data for a single group in this inventory.
'''
return self.group_data.get(group, {})
def get_groups_data(self, groups):
'''
Gets aggregated data from a list of groups. Vars are collected in order so, for
any groups which define the same var twice, the last group's value will hold.
'''
data = {}
for group in groups:
data.update(
self.get_group_data(group).dict()
)
return AttrData(data)
|
Python
| 0 |
@@ -1762,79 +1762,75 @@
ild
-the actual Host instances%0A hosts = %7B%7D%0A for name in names:
+host data%0A for name in names:%0A # Extract any data
%0A
@@ -1949,50 +1949,494 @@
-self.host_data%5Bname%5D = AttrData(host_data)
+# Ensure host has data dict%0A self.host_data.setdefault(name, %7B%7D)%0A # Give host any data%0A self.host_data%5Bname%5D.update(host_data)%0A%0A # Now we've got host data, convert -%3E AttrData%0A self.host_data = %7B%0A name: AttrData(d)%0A for name, d in self.host_data.iteritems()%0A %7D%0A%0A # Actually make Host instances%0A hosts = %7B%7D%0A for name in names:%0A name = name%5B0%5D if isinstance(name, tuple) else name
%0A%0A
|
f1c65cf208b4a6275214d82a765ad75c47c75715
|
add example of how to use KT without defines
|
examples/cuda-c++/vector_add_defines.py
|
examples/cuda-c++/vector_add_defines.py
|
Python
| 0 |
@@ -0,0 +1,932 @@
+#!/usr/bin/env python%0A%22%22%22 This is the example demonstrates how to use Kernel Tuner%0A to insert tunable parameters into template arguments%0A without using any C preprocessor defines%0A%22%22%22%0A%0Aimport numpy as np%0Aimport kernel_tuner as kt%0A%0Adef tune():%0A%0A kernel_string = %22%22%22%0Atemplate%3Ctypename T, int blockSize%3E%0A__global__ void vector_add(T *c, T *a, T *b, int n) %7B%0A auto i = blockIdx.x * blockSize + threadIdx.x;%0A if (i%3Cn) %7B%0A c%5Bi%5D = a%5Bi%5D + b%5Bi%5D;%0A %7D%0A%7D%0A%22%22%22%0A%0A size = 10000000%0A%0A a = np.random.randn(size).astype(np.float32)%0A b = np.random.randn(size).astype(np.float32)%0A c = np.zeros_like(b)%0A n = np.int32(size)%0A%0A args = %5Bc, a, b, n%5D%0A%0A tune_params = dict()%0A tune_params%5B%22block_size_x%22%5D = %5B128+64*i for i in range(15)%5D%0A%0A result, env = kt.tune_kernel(%22vector_add%3Cfloat, block_size_x%3E%22, kernel_string, size, args, tune_params, defines=%7B%7D)%0A%0A return result%0A%0A%0Aif __name__ == %22__main__%22:%0A tune()%0A
|
|
00cc1f17796897ca2f4351bbea74ee22aad98f14
|
Create quadrants_HH_HL_LH_LL.py
|
quadrants_HH_HL_LH_LL.py
|
quadrants_HH_HL_LH_LL.py
|
Python
| 0.999018 |
@@ -0,0 +1,2231 @@
+# python3 for categorizing data into 4 quadrants from 2 numerical fields%0A%0A# this case is for vis minoirty + avg income in Toronto census tracts%0A%0Aimport csv%0Aimport statistics as st%0A%0A%0A# just the toronto cts%0Ator_cts = %5B%5D%0Awith open('ct_tor.csv', 'r') as csvfile:%0A reader = csv.DictReader(csvfile)%0A for row in reader:%0A tor_cts.append(row%5B'ctuid'%5D)%0A%0Avar_1 = %5B%5D # avg inc%0Avar_2 = %5B%5D # perc vis min%0Awith open('in_inc_vis.csv', 'r') as csvfile:%0A reader = csv.DictReader(csvfile)%0A for row in reader:%0A if row%5B'ctuid'%5D in tor_cts:%0A try:%0A var_1.append(float(row%5B'avg_inc'%5D))%0A perc_vis = float(row%5B'vis_min_pop'%5D) / float(row%5B'total_pop'%5D)%0A var_2.append(perc_vis)%0A except:%0A print(row%5B'ctuid'%5D)%0A%0Aprint(len(var_1))%0Aprint(len(var_2))%0Av1b=v2b=0%0Aprint(%22----------------------------------%22)%0A# for var 1%0Aprint(%22median%22, st.median(var_1))%0Aprint(%22mean%22, st.mean(var_1))%0Aprint(%22input break value:%22)%0Av1b = float(input())%0A# for var 2%0Aprint(%22----------------------------------%22)%0Aprint(%22median%22, st.median(var_2))%0Aprint(%22mean%22, st.mean(var_2))%0Aprint(%22input break value:%22)%0Av2b = float(input())%0A%0AHHc = 0%0AHLc = 0%0ALHc = 0%0ALLc = 0%0A# break the data via the set breaks%0Awith open('in_inc_vis.csv', 'r') as csvfile:%0A reader = csv.DictReader(csvfile)%0A for row in reader:%0A if row%5B'ctuid'%5D in tor_cts:%0A try:%0A perc_vis = float(row%5B'vis_min_pop'%5D) / float(row%5B'total_pop'%5D)%0A inc = float(row%5B'avg_inc'%5D)%0A%0A # ye olde if statements%0A if inc %3E v1b and perc_vis %3E v2b:%0A q = 'HH'%0A HHc += 1%0A elif inc %3E v1b and perc_vis %3C= v2b:%0A q = 'HL'%0A HLc += 1%0A elif inc %3C= v1b and perc_vis %3E v2b:%0A q = 'LH'%0A LHc += 1%0A elif inc %3C= v1b and perc_vis %3C= v2b:%0A q = 'LL'%0A LLc += 1%0A orow = %5Brow%5B'ctuid'%5D,inc,perc_vis,q%5D%0A #print(orow)%0A except:%0A #print(row%5B'ctuid'%5D)%0A None%0A%0Aprint(%22HH%22, HHc)%0Aprint(%22LH%22, LHc)%0Aprint(%22HL%22, HLc)%0Aprint(%22LL%22, LLc)%0A
|
|
9dae55d2ef2e786799554ec2121cf9ecfe59eb62
|
Rename file
|
dnsdiff/dnsdiff.py
|
dnsdiff/dnsdiff.py
|
Python
| 0.000002 |
@@ -0,0 +1,870 @@
+'''Module to quickly look up and compare NS records for differences'''%0A%0Aimport dns.resolver%0Aimport pprint%0Aimport sys%0A%0App = pprint.PrettyPrinter(indent=4)%0A%0Adef compare_dns(nameservers, domain):%0A%09'''Compares records between nameservers using dnspython'''%0A%0A%09responses = %7B%7D%0A%09resolver = dns.resolver.Resolver(configure=False)%0A%0A%09for ns in nameservers:%0A%09%09ns_list = %5B%5D%0A%09%09resolver.nameservers = ns%0A%09%09answer = dns.resolver.query(domain, 'NS')%0A%09%09for record in answer:%0A%09%09%09ns_list.append(record.target)%0A%09%09responses%5Bns%5D = sorted(ns_list)%0A%0A%09pp.pprint(responses)%0A%09print %22Determining differences%22%0A%0A%09set_list = %5B%5D%0A%09for val in responses.values():%0A%09%09set_list.append(set(val))%0A%0A%09differences = set.difference(*set_list)%0A%0A%09if len(differences) == 0 or len(nameservers) == 1:%0A%09%09print %22No discrepancies found%22%0A%09%09sys.exit(0)%0A%09else:%0A%09%09print %22Discrepancies found!%22%0A%09%09print differences%0A%09%09sys.exit(1)%0A
|
|
bef5333edf60779f645603b3d4c7611867ad7382
|
Day25 and final day! yaaaay
|
day25/code_generator.py
|
day25/code_generator.py
|
Python
| 0.999998 |
@@ -0,0 +1,292 @@
+row = 2978%0Acolumn = 3083%0A%0Ax = 1%0Ay = 1%0A%0Avalue = 20151125%0Astep = 1%0A%0Awhile x %3C= column or y %3C= row:%0A%09if x == step and y == 1:%0A%09%09step += 1%0A%09%09y = step%0A%09%09x = 1%0A%09else:%0A%09%09x += 1%0A%09%09y -= 1%0A%09value = (value * 252533) %25 33554393%0A%0A%09if x == column and y == row:%0A%09%09print 'The code for the machine is:', value
|
|
be189d9d01f916af87b45f36ac36f7c5d302dbbf
|
add an experimental command for setting the login background image
|
kolibri/content/management/commands/background.py
|
kolibri/content/management/commands/background.py
|
Python
| 0 |
@@ -0,0 +1,2125 @@
+from __future__ import absolute_import%0Afrom __future__ import print_function%0Afrom __future__ import unicode_literals%0A%0Aimport logging%0Aimport os%0Aimport shutil%0A%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand%0A%0Alogger = logging.getLogger(__name__)%0A%0Aclass Command(BaseCommand):%0A%0A def add_arguments(self, parser):%0A subparsers = parser.add_subparsers(dest='command', help=%22The following subcommands are available.%22)%0A movedir_subparser = subparsers.add_parser(%0A name='set',%0A cmd=self,%0A help=%22EXPERIMENTAL: Sets the login screen background image%22%0A )%0A movedir_subparser.add_argument(%0A 'destination',%0A type=str,%0A help='Image file'%0A )%0A subparsers.add_parser(%0A name='reset',%0A cmd=self,%0A help=%22Set default%22%0A )%0A%0A def handle(self, *args, **options):%0A user_static_directory = os.path.join(settings.STATIC_ROOT, 'user_module')%0A if not os.path.exists(user_static_directory):%0A self.stderr.write(self.style.ERROR('%5CnStatic directory does not exist.'))%0A raise SystemExit(1)%0A%0A img_path = os.path.join(user_static_directory, 'background.jpg')%0A backup_img_path = os.path.join(user_static_directory, 'background-backup')%0A%0A if options%5B'command'%5D == 'set':%0A new_img_path = os.path.abspath(os.path.expanduser(options%5B'destination'%5D))%0A if not os.path.exists(new_img_path):%0A self.stderr.write(%0A self.style.ERROR('%5Cn%7B%7D does not exist.').format(options%5B'destination'%5D)%0A )%0A raise SystemExit(1)%0A%0A # Only save a backup if it didn't exist before.%0A # This should only back up the default Kolibri image.%0A if not os.path.exists(backup_img_path):%0A shutil.copy(img_path, backup_img_path)%0A%0A shutil.copy(new_img_path, img_path)%0A%0A elif options%5B'command'%5D == 'reset':%0A if os.path.exists(backup_img_path):%0A shutil.copy(backup_img_path, img_path)%0A
|
|
1f48fee7ffcef3eefa6aaedb5ca963c10bb7c58c
|
Add test case for user creation form
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py
|
{{cookiecutter.repo_name}}/{{cookiecutter.repo_name}}/users/test_forms.py
|
Python
| 0.000001 |
@@ -0,0 +1,881 @@
+from django.test import TestCase%0A%0Afrom users.forms import ZionsUserCreationForm%0Afrom users.models import User%0A%0A%0Aclass %7B%7Bcookiecutter.project_camel_name%7D%7DUserCreationTestCase(TestCase):%0A def setUp(self):%0A self.test_user = User.objects.create(%0A username='testuser',%0A email='[email protected]',%0A password='password'%0A )%0A%0A self.bad_form = ZionsUserCreationForm(%7B%0A 'username': 'testuser',%0A 'password1': 'password',%0A 'password2': 'password',%0A %7D)%0A%0A self.good_form = ZionsUserCreationForm(%7B%0A 'username': 'testuser2',%0A 'password1': 'password',%0A 'password2': 'password',%0A %7D)%0A%0A def test_username_good(self):%0A self.assertTrue(self.good_form.is_valid())%0A%0A def test_clean_username_bad(self):%0A self.assertFalse(self.bad_form.is_valid())%0A
|
|
616bb27db3daef8939fe706d1c41cf79f35b40fa
|
set of default rules in common module
|
common.py
|
common.py
|
Python
| 0 |
@@ -0,0 +1,553 @@
+#/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2012 Denis Zalevskiy%0A# Licensed under MIT License%0A%0Aimport string%0A%0Afrom parser import *%0A%0Adef vspace(): return '%5Cn%5Cr', ignore%0Adef hspace(): return ' %5Ct', ignore%0Adef eol(): return choice(eof, vspace), ignore%0Adef space(): return ' %5Cn%5Cr%5Ct', ignore%0Adef spaces(): return r0_inf(space), ignore%0A%0Adef any_char(): return ne(eof), value%0Adef digit_dec() : return '0123456789', value%0Adef digit_hex() : return '0123456789ABCDEFabcdef', value%0Adef ascii(): return sym(lambda s: s in string.ascii_letters), value%0A
|
|
b8c4fdc1ebba18ab832160bece4ce8b391a15b7a
|
add sampled stochastic games serialization tests
|
open_spiel/python/tests/sampled_stochastic_games_test.py
|
open_spiel/python/tests/sampled_stochastic_games_test.py
|
Python
| 0 |
@@ -0,0 +1,2450 @@
+# Copyright 2019 DeepMind Technologies Ltd. All rights reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A%0Afrom __future__ import absolute_import%0Afrom __future__ import division%0Afrom __future__ import print_function%0A%0Aimport pickle%0Afrom absl.testing import absltest%0Afrom absl.testing import parameterized%0A%0Aimport numpy as np%0Aimport pyspiel%0A%0A# All games with kSampledStochastic chance mode.%0ASPIEL_SAMPLED_STOCHASTIC_GAMES_LIST = %5B%0A g for g in pyspiel.registered_games() if g.default_loadable%0A and g.chance_mode == pyspiel.GameType.ChanceMode.SAMPLED_STOCHASTIC%0A%5D%0Aassert len(SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST) %3E= 2%0A%0A%0Aclass SampledStochasticGamesTest(parameterized.TestCase):%0A def random_playout(self, state):%0A np.random.seed(0)%0A while not state.is_terminal():%0A state.apply_action(np.random.choice(state.legal_actions()))%0A return state%0A%0A @parameterized.parameters(*SPIEL_SAMPLED_STOCHASTIC_GAMES_LIST)%0A def test_stateful_game_serialization(self, game_info):%0A game = pyspiel.load_game(game_info.short_name,%0A %7B%22rng_seed%22: pyspiel.GameParameter(0)%7D)%0A # mutate game's internal RNG state%0A state = self.random_playout(game.new_initial_state())%0A deserialized_game = pickle.loads(pickle.dumps(game))%0A%0A # make sure initial states are the same after game deserialization%0A state1 = self.random_playout(game.new_initial_state())%0A d_state1 = self.random_playout(deserialized_game.new_initial_state())%0A self.assertNotEqual(str(state1), str(state))%0A self.assertEqual(str(state1), str(d_state1))%0A # try one more time%0A state2 = self.random_playout(game.new_initial_state())%0A d_state2 = self.random_playout(deserialized_game.new_initial_state())%0A self.assertNotEqual(str(state2), str(state1))%0A self.assertEqual(str(state2), str(d_state2))%0A%0A%0Aif __name__ == %22__main__%22:%0A absltest.main()%0A
|
|
ed9d640a11c02ca4b42e62d975e4ae9a2bd33093
|
add tests for simtk!
|
openpathsampling/experimental/storage/test_simtk_unit.py
|
openpathsampling/experimental/storage/test_simtk_unit.py
|
Python
| 0 |
@@ -0,0 +1,2650 @@
+import pytest%0Aimport numpy as np%0A%0Afrom ..simstore.custom_json import JSONSerializerDeserializer, DEFAULT_CODECS%0A%0Afrom .simtk_unit import *%0A%0Atry:%0A from simtk import unit%0Aexcept ImportError:%0A HAS_SIMTK = False%0Aelse:%0A HAS_SIMTK = True%0A%0Aclass TestSimtkUnitCodec(object):%0A def setup(self):%0A pytest.importorskip('simtk.unit')%0A my_unit = unit.nanometer / unit.picosecond**2%0A self.values = %7B%0A 'float': 1.0 * my_unit,%0A 'array': np.array(%5B1.0, 2.0%5D) * my_unit,%0A %7D%0A self.serialization = JSONSerializerDeserializer(%0A DEFAULT_CODECS + %5Bsimtk_quantity_codec%5D%0A )%0A%0A @pytest.mark.parametrize('obj_type', %5B'float', 'array'%5D)%0A def test_serialization_cycle(self, obj_type):%0A obj = self.values%5Bobj_type%5D%0A ser = self.serialization.serializer(obj)%0A deser = self.serialization.deserializer(ser)%0A reser = self.serialization.serializer(deser)%0A if obj_type == 'array':%0A np.testing.assert_array_equal(obj, deser)%0A else:%0A assert obj == deser%0A assert ser == reser%0A%0A%0Aclass TestSimtkQuantityHandler(object):%0A def setup(self):%0A pytest.importorskip('simtk.unit')%0A self.handlers = %7B%0A 'float': SimtkQuantityHandler(%0A ('unit.nanometer/unit.picosecond**2', 'float')%0A ),%0A 'array': SimtkQuantityHandler(%0A ('unit.nanometer', 'ndarray.float32(2,3)')%0A ),%0A %7D%0A self.objects = %7B%0A 'float': 1.0 * unit.nanometer / unit.picosecond**2,%0A 'array': np.array(%5B%5B1.0, 2.0, 3.0%5D,%0A %5B4.0, 5.0, 6.0%5D%5D) * unit.nanometer,%0A %7D%0A%0A @pytest.mark.parametrize('type_str, expected', %5B%0A (%0A 'simtk(unit.nanometer/unit.picosecond**2)*float',%0A ('unit.nanometer/unit.picosecond**2', 'float')%0A ), (%0A 'simtk(unit.nanometer)*ndarray.float32(3,3)',%0A ('unit.nanometer', 'ndarray.float32(3,3)')%0A ),%0A %5D)%0A def test_is_my_type(self, type_str, expected):%0A assert SimtkQuantityHandler.is_my_type(type_str) == expected%0A%0A @pytest.mark.parametrize('obj_type', %5B'float', 'array'%5D)%0A def test_serialization_cycle(self, obj_type):%0A handler = self.handlers%5Bobj_type%5D%0A obj = self.objects%5Bobj_type%5D%0A ser = handler.serialize(obj)%0A deser = handler.deserialize(ser)%0A reser = handler.serialize(deser)%0A%0A assert ser == reser%0A if obj_type == 'array':%0A np.testing.assert_array_equal(obj, deser)%0A else:%0A assert obj == deser%0A assert obj.unit == deser.unit%0A
|
|
4d85702561c000824083544de98693e244c8aab7
|
Add test for decoder stack
|
tests/test_decoding_stack.py
|
tests/test_decoding_stack.py
|
Python
| 0.000001 |
@@ -0,0 +1,2736 @@
+#! /usr/bin/env python%0A%0Afrom __future__ import division%0A%0Afrom timeside.decoder import FileDecoder%0Afrom timeside.analyzer import AubioPitch%0Afrom timeside.core import ProcessPipe%0Aimport numpy as np%0Afrom unit_timeside import *%0A%0Aimport os.path%0A%0A#from glib import GError as GST_IOError%0A# HINT : to use later with Gnonlin only%0A%0A%0Aclass TestDecodingFromStack(unittest.TestCase):%0A %22Test decoder stack%22%0A%0A def setUp(self):%0A self.samplerate, self.channels, self.blocksize = None, None, None%0A self.start = 0%0A self.duration = None%0A%0A self.expected_samplerate = 44100%0A self.expected_channels = 2%0A self.expected_totalframes = 352800%0A self.test_exact_duration = True%0A self.source_duration = 8%0A self.expected_mime_type = 'audio/x-wav'%0A self.source = os.path.join(os.path.dirname(__file__),%0A %22samples/sweep.wav%22)%0A%0A def testProcess(self):%0A %22Test decoder stack: test process%22%0A decoder = FileDecoder(uri=self.source,%0A start=self.start,%0A duration=self.duration,%0A stack=True)%0A self.assertTrue(decoder.stack)%0A self.assertFalse(decoder.from_stack)%0A%0A pipe = ProcessPipe(decoder)%0A%0A pipe.run()%0A%0A self.assertFalse(decoder.stack)%0A self.assertTrue(decoder.from_stack)%0A%0A self.assertEqual(len(pipe.frames_stack), 44)%0A%0A pipe.run()%0A%0A def testResults(self):%0A %22Test decoder stack: test frames content%22%0A%0A decoder = FileDecoder(uri=self.source,%0A start=self.start,%0A duration=self.duration,%0A stack=True)%0A pitch_on_file = AubioPitch()%0A pipe = (decoder %7C pitch_on_file)%0A%0A pipe.run()%0A%0A self.assertIsInstance(pipe.frames_stack, list)%0A%0A pitch_results_on_file = pipe.results%5B'aubio_pitch.pitch'%5D.data.copy()%0A%0A # If the pipe is used for a second run, the processed frames stored%0A # in the stack are passed to the other processors%0A # without decoding the audio source again.%0A #Let's define a second analyzer equivalent to the previous one:%0A%0A pitch_on_stack = AubioPitch()%0A pipe %7C= pitch_on_stack%0A pipe.run()%0A%0A # to assert that the frames passed to the two analyzers are the same,%0A # we check that the results of these analyzers are equivalent:%0A pitch_results_on_stack = pipe.results%5B'aubio_pitch.pitch'%5D.data%0A%0A self.assertTrue(np.array_equal(pitch_results_on_stack,%0A pitch_results_on_file))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main(testRunner=TestRunner())%0A
|
|
40bf8d4773eb659ac2ac22aef50c2f63084924be
|
add profiler test case
|
rfcs/20200624-pluggable-device-for-tensorflow/sample/test_profiler.py
|
rfcs/20200624-pluggable-device-for-tensorflow/sample/test_profiler.py
|
Python
| 0.000002 |
@@ -0,0 +1,1206 @@
+#!/usr/bin/env python%0A# coding=utf-8%0Aimport tensorflow as tf%0Aimport numpy as np%0Aimport os%0Atf.compat.v1.disable_eager_execution()%0A%0Aprofile_options = tf.profiler.experimental.ProfilerOptions(%0A host_tracer_level = 3,%0A device_tracer_level = 1)%0A%0Alogpath = os.path.join('data', 'logs', 'profiler_demo')%0A %0Aa = tf.random.normal(shape=%5B1,10, 10, 8%5D, dtype=tf.float32, seed=1)%0Aw = tf.random.normal(shape=%5B3, 3, 8, 4%5D, dtype=tf.float32, seed=1)%0A%0Aa1 = tf.random.normal(shape=%5B1, 10, 10, 8%5D, dtype=tf.float32, seed=1)%0Aw1 = tf.random.normal(shape=%5B3, 3, 8, 4%5D, dtype=tf.float32, seed=1)%0A%0A%0Awith tf.device(%22/MY_DEVICE:0%22):%0A tf.profiler.experimental.start(logpath)%0A b = tf.nn.relu(a)%0A c = tf.nn.conv2d(b, w, strides=%5B1, 1, 1, 1%5D, padding='SAME', data_format='NHWC')%0A tf.profiler.experimental.stop()%0Awith tf.device(%22/CPU:0%22):%0A b1 = tf.nn.relu(a1)%0A c1 = tf.nn.conv2d(b1, w1, strides=%5B1, 1, 1, 1%5D, padding='SAME', data_format='NHWC')%0A%0A%0Asess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(allow_soft_placement=False, log_device_placement=True))%0Aprint(sess.run(tf.reduce_all(tf.less(c - c1, 1e-5))))%0A%0A
|
|
3d0827fa805a08eaaaa07e037f6ce3da6d8e1c4e
|
add guess module
|
yoink/guess.py
|
yoink/guess.py
|
Python
| 0.000001 |
@@ -0,0 +1,1688 @@
+import numpy as np%0Afrom scipy import ndimage%0A%0Atry:%0A from skimage.feature import corner_harris%0A from skimage.measure import approximate_polygon%0Aexcept ImportError:%0A from yoink.mini_skimage import corner_harris, approximate_polygon%0A%0A%0Adef guess_corners(bw):%0A %22%22%22%0A Infer the corners of an image using a Sobel filter to find the edges and a%0A Harris filter to find the corners. Takes only as single color chanel.%0A%0A Parameters%0A ----------%0A bw : (m x n) ndarray of ints%0A%0A Returns%0A -------%0A corners : pixel coordinates of plot corners%0A outline : (m x n) ndarray of bools True -%3E plot area%0A %22%22%22%0A e_map = ndimage.sobel(bw)%0A%0A markers = np.zeros_like(bw)%0A markers%5Bbw %3C 30%5D = 1%0A markers%5Bbw %3E 150%5D = 2%0A seg = ndimage.watershed_ift(e_map, np.asarray(markers, dtype=int))%0A%0A outline = ndimage.binary_fill_holes(1-seg)%0A corners = corner_harris(np.asarray(outline, dtype=int))%0A corners = approximate_polygon(corners, 1)%0A return corners, outline%0A%0A%0Adef get_angle(p1, p2):%0A return np.arctan2(p1%5B0%5D-p2%5B0%5D, p1%5B1%5D-p2%5B1%5D) * 180./np.pi%0A%0A%0Adef get_angle2(corners):%0A order = np.argsort(corners%5B:, 0%5D)%0A top = corners%5Border%5B:2%5D%5D%0A bot = corners%5Border%5B2:%5D%5D%0A%0A order = np.argsort(corners%5B:, 1%5D)%0A left = corners%5Border%5B:2%5D%5D%0A right = corners%5Border%5B2:%5D%5D%0A%0A angles = %5Bget_angle(top%5B0, :%5D, top%5B1, :%5D),%0A get_angle(bot%5B0, :%5D, bot%5B1, :%5D),%0A get_angle(left%5B0, :%5D, left%5B1, :%5D) + 90,%0A get_angle(right%5B0, :%5D, right%5B1, :%5D) + 90,%0A %5D%0A angle = sum(angles) / len(angles)%0A return angle%0A%0A%0Adef clear_border(im, outline):%0A im_fixed = im.copy()%0A im_fixed%5B-outline%5D = 255%0A return im_fixed%0A%0A
|
|
4224761522c1e058979f3901f9af1d037398576c
|
Add cache_key method to be used by Django 1.7
|
django_mobile/loader.py
|
django_mobile/loader.py
|
import hashlib
from django.template import TemplateDoesNotExist
from django.template.loader import find_template_loader, BaseLoader
from django.template.loader import get_template_from_string
from django.template.loaders.cached import Loader as DjangoCachedLoader
from django_mobile import get_flavour
from django_mobile.conf import settings
class Loader(BaseLoader):
is_usable = True
def __init__(self, *args, **kwargs):
loaders = []
for loader_name in settings.FLAVOURS_TEMPLATE_LOADERS:
loader = find_template_loader(loader_name)
if loader is not None:
loaders.append(loader)
self.template_source_loaders = tuple(loaders)
super(BaseLoader, self).__init__(*args, **kwargs)
def get_template_sources(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'get_template_sources'):
try:
for result in loader.get_template_sources(
template_name,
template_dirs):
yield result
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of this particular
# template_dir (it might be inside another one, so this isn't
# fatal).
pass
def prepare_template_name(self, template_name):
template_name = u'%s/%s' % (get_flavour(), template_name)
if settings.FLAVOURS_TEMPLATE_PREFIX:
template_name = settings.FLAVOURS_TEMPLATE_PREFIX + template_name
return template_name
def load_template(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
try:
return loader(template_name, template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
def load_template_source(self, template_name, template_dirs=None):
template_name = self.prepare_template_name(template_name)
for loader in self.template_source_loaders:
if hasattr(loader, 'load_template_source'):
try:
return loader.load_template_source(
template_name,
template_dirs)
except TemplateDoesNotExist:
pass
raise TemplateDoesNotExist("Tried %s" % template_name)
class CachedLoader(DjangoCachedLoader):
is_usable = True
def load_template(self, template_name, template_dirs=None):
key = "{0}:{1}".format(get_flavour(), template_name)
if template_dirs:
# If template directories were specified, use a hash to differentiate
key = '-'.join([
template_name,
hashlib.sha1('|'.join(template_dirs)).hexdigest()])
if key not in self.template_cache:
template, origin = self.find_template(template_name, template_dirs)
if not hasattr(template, 'render'):
try:
template = get_template_from_string(template, origin, template_name)
except TemplateDoesNotExist:
# If compiling the template we found raises TemplateDoesNotExist,
# back off to returning the source and display name for
# the template we were asked to load. This allows for
# correct identification (later) of the actual template
# that does not exist.
return template, origin
self.template_cache[key] = template
return self.template_cache[key], None
|
Python
| 0 |
@@ -335,16 +335,62 @@
ettings%0A
+from django.utils.encoding import force_bytes%0A
%0A%0Aclass
@@ -2927,32 +2927,367 @@
_usable = True%0A%0A
+ def cache_key(self, template_name, template_dirs):%0A if template_dirs:%0A key = '-'.join(%5B%0A template_name,%0A hashlib.sha1(force_bytes('%7C'.join(template_dirs))).hexdigest()%0A %5D)%0A else:%0A key = template_name%0A%0A return '%7B0%7D:%7B1%7D'.format(get_flavour(), key)%0A%0A
def load_tem
|
4ef2344b3abf3d8c0542ffd97425557ae092f21d
|
handle ZeroDivisionError
|
tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py
|
tensorflow/python/data/experimental/benchmarks/map_defun_benchmark.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Benchmarks for MapDefunOp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.data.experimental.ops import map_defun
from tensorflow.python.data.benchmarks import benchmark_base
from tensorflow.python.eager import function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import map_fn
from tensorflow.python.ops import math_ops
class MapDefunBenchmark(benchmark_base.DatasetBenchmarkBase):
"""Benchmarks for MapDefunOp."""
def _run(self, op, name=None, num_iters=3000):
wall_time = self.run_op_benchmark(
op=op,
iters=num_iters,
warmup=True
)
self.report_benchmark(
name=name,
iters=num_iters,
wall_time=wall_time,
extras={"examples_per_sec": float(1 / wall_time)})
def benchmark_defun_vs_map_fn(self):
"""Benchmarks to compare the performance of MapDefun vs tf.map_fn."""
@function.defun(input_signature=[tensor_spec.TensorSpec([], dtypes.int32)])
def defun(x):
return array_ops.identity(x)
def fn(x):
return array_ops.identity(x)
base = math_ops.range(10000)
for input_size in [10, 100, 1000, 10000]:
num_iters = 10000 // input_size
map_defun_op = map_defun.map_defun(defun, [base], [dtypes.int32], [()])
map_fn_op = map_fn.map_fn(fn, base)
self._run(
op=map_defun_op,
name="with_defun_size_%d" % input_size,
num_iters=num_iters
)
self._run(
op=map_fn_op,
name="without_defun_size_%d" % input_size,
num_iters=num_iters
)
if __name__ == "__main__":
benchmark_base.test.main()
|
Python
| 0.000006 |
@@ -1472,24 +1472,105 @@
=True%0A )%0A
+ zero_division_delta = 1e-100%0A wall_time = wall_time + zero_division_delta%0A
self.rep
@@ -1696,19 +1696,19 @@
ec%22:
+ 1 /
float(
-1 /
wall
|
ef96c4e1a27289f5cdad5de78ee2a2dfc1b91bd0
|
Create network-delay-time.py
|
Python/network-delay-time.py
|
Python/network-delay-time.py
|
Python
| 0.000037 |
@@ -0,0 +1,777 @@
+# Time: O((%7CE%7C + %7CV%7C) * log%7CV%7C)%0A# Space: O(%7CE%7C + %7CV%7C)%0A%0A# Dijkstra's algorithm%0Aclass Solution(object):%0A def networkDelayTime(self, times, N, K):%0A %22%22%22%0A :type times: List%5BList%5Bint%5D%5D%0A :type N: int%0A :type K: int%0A :rtype: int%0A %22%22%22%0A min_heap = %5B%5D%0A adj = %5B%5B%5D for _ in xrange(N)%5D%0A for u, v, w in times:%0A adj%5Bu-1%5D.append((v-1, w))%0A%0A lookup, result = set(), 0%0A heapq.heappush(min_heap, (0, K-1))%0A while min_heap and len(lookup) != N:%0A result, u = heapq.heappop(min_heap)%0A lookup.add(u)%0A for v, w in adj%5Bu%5D:%0A if v in lookup: continue%0A heapq.heappush(min_heap, (result+w, v))%0A return result if len(lookup) == N else -1%0A %0A
|
|
842d7337f236d94d1b7ed70aaa98eff73b4000cd
|
Create pyside_houdini.py
|
pyside_houdini.py
|
pyside_houdini.py
|
Python
| 0 |
@@ -0,0 +1,2485 @@
+%22%22%22%0AThis module helps you use PyQt in Houdini's GUI by integrating PyQt's event%0Aloop into Houdini's. Replace calls to QApplication.exec_() in your%0Acode with calls to pyqt_houdini.exec_(app).%0A%22%22%22%0Afrom email.mime import image%0A%0Aimport hou%0Afrom PySide import QtCore%0Afrom PySide import QtGui%0A%0Aclass IntegratedEventLoop(object):%0A %22%22%22This class behaves like QEventLoop except it allows PyQt to run inside%0A Houdini's event loop on the main thread. You probably just want to%0A call exec_() below instead of using this class directly.%0A %22%22%22%0A def __init__(self, application, dialogs):%0A # We need the application to send posted events. We hold a reference%0A # to any dialogs to ensure that they don't get garbage collected%0A # (and thus close in the process). The reference count for this object%0A # will go to zero when it removes itself from Houdini's event loop.%0A self.application = application%0A self.dialogs = dialogs%0A self.event_loop = QtCore.QEventLoop()%0A%0A def exec_(self):%0A hou.ui.addEventLoopCallback(self.processEvents)%0A%0A def processEvents(self):%0A # There is no easy way to know when the event loop is done. We can't%0A # use QEventLoop.isRunning() because it always returns False since%0A # we're not inside QEventLoop.exec_(). We can't rely on a%0A # lastWindowClosed signal because the window is usually made invisible%0A # instead of closed. Instead, we need to explicitly check if any top%0A # level widgets are still visible.%0A if not anyQtWindowsAreOpen():%0A hou.ui.removeEventLoopCallback(self.processEvents)%0A%0A self.event_loop.processEvents()%0A self.application.sendPostedEvents(None, 0)%0A%0Adef anyQtWindowsAreOpen():%0A return any(w.isVisible() for w in QtGui.QApplication.topLevelWidgets())%0A%0Adef exec_(application, *args):%0A %22%22%22You cannot call QApplication.exec_, or Houdini will freeze while PyQt%0A waits for and processes events. Instead, call this function to allow%0A Houdini's and PyQt's event loops to coexist. Pass in any dialogs as%0A extra arguments, if you want to ensure that something holds a reference%0A to them while the event loop runs.%0A%0A This function returns right away.%0A %22%22%22%0A IntegratedEventLoop(application, args).exec_()%0A%0Adef execSynchronously(application, *args):%0A %22%22%22This function is like exec_, except it will not return until all PyQt%0A windows have closed. Houdini will remain responsive while the PyQt window%0A is open.%0A %22%22%22%0A exec_(application, *args)%0A hou.ui.waitUntil(lambda: not anyQtWindowsAreOpen())%0A
|
|
90ef0ed82a4d22f277ccc0c3275f0a07189fadc0
|
Make title pictures.
|
title_pics.py
|
title_pics.py
|
Python
| 0.000006 |
@@ -0,0 +1,2303 @@
+# -*- coding: utf-8 -*-%0A#%0A# title_pics.py%0A#%0A# purpose: Create map and time-series for title%0A# author: Filipe P. A. Fernandes%0A# e-mail: ocefpaf@gmail%0A# web: http://ocefpaf.github.io/%0A# created: 20-Jan-2015%0A# modified: Tue 20 Jan 2015 11:18:15 AM BRT%0A#%0A# obs:%0A#%0A%0A%0Aimport matplotlib%0Aimport numpy as np%0Aimport cartopy.crs as ccrs%0Aimport matplotlib.pyplot as plt%0Afrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER%0A%0Amatplotlib.style.use('ggplot')%0A%0Adef make_map(projection=ccrs.PlateCarree(),%0A extent=%5B-43.5, -32.5, -24.5, -14.5%5D):%0A subplot_kw = dict(projection=projection)%0A fig, ax = plt.subplots(figsize=(3.25, 3.25), subplot_kw=subplot_kw)%0A ax.set_extent(extent)%0A #gl = ax.gridlines(draw_labels=True)%0A #gl.xlabels_top = gl.ylabels_right = False%0A #gl.xformatter = LONGITUDE_FORMATTER%0A #gl.yformatter = LATITUDE_FORMATTER%0A return fig, ax%0A%0A%0Adef fake_tide(t, M2amp, M2phase, S2amp, S2phase, randamp):%0A %22%22%22%0A Generate a minimally realistic-looking fake semidiurnal tide.%0A%0A t is time in hours%0A phases are in radians%0A %22%22%22%0A out = M2amp * np.sin(2 * np.pi * t / 12.42 - M2phase)%0A out += S2amp * np.sin(2 * np.pi * t / 12.0 - S2phase)%0A out += randamp * np.random.randn(len(t))%0A return out%0A%0A%0Aif __name__ == '__main__':%0A # Map.%0A layer = 'BlueMarble_ShadedRelief_Bathymetry'%0A url = 'http://map1c.vis.earthdata.nasa.gov/wmts-geo/wmts.cgi'%0A%0A fig, ax = make_map()%0A ax.add_wmts(url, layer)%0A ax.axis('off')%0A%0A fig.savefig('map.png', format='png', dpi=72, orientation='portrait',%0A transparent=True)%0A%0A # Time-series.%0A t = np.arange(500)%0A u = fake_tide(t, 2.2, 0.3, 1, .3, 0.4)%0A v = fake_tide(t, 1.1, 0.3 + np.pi / 2, 0.6, 0.3 + np.pi / 2, 0.4)%0A%0A fig, ax = plt.subplots(figsize=(3.25, 3.25))%0A legendkw = dict(loc='lower right', fancybox=True, fontsize='small')%0A%0A kw = dict(alpha=0.5, linewidth=2.5)%0A ax.plot(t, u, label='U', color='cornflowerblue', **kw)%0A ax.plot(t, v, label='V', color='lightsalmon', **kw)%0A ax.axis(%5B200, 500, -8, 8%5D)%0A%0A # Keep the y tick labels from getting too crowded.%0A ax.locator_params(axis='y', nbins=5)%0A ax.axis('off')%0A%0A fig.savefig('timeSeries.png', format='png', dpi=72, orientation='portrait',%0A transparent=True)%0A
|
|
84153b0be78998ab8ec6914df8623c99255457b5
|
Improve code for creating temporary locustfiles that can be used in tests
|
locust/test/mock_locustfile.py
|
locust/test/mock_locustfile.py
|
Python
| 0 |
@@ -0,0 +1,1224 @@
+import os%0Aimport random%0Aimport time%0A%0Afrom contextlib import contextmanager%0A%0A%0AMOCK_LOUCSTFILE_CONTENT = '''%0A%22%22%22This is a mock locust file for unit testing%22%22%22%0A%0Afrom locust import HttpLocust, TaskSet, task, between%0A%0A%0Adef index(l):%0A l.client.get(%22/%22)%0A%0Adef stats(l):%0A l.client.get(%22/stats/requests%22)%0A%0A%0Aclass UserTasks(TaskSet):%0A # one can specify tasks like this%0A tasks = %5Bindex, stats%5D%0A%0A%0Aclass LocustSubclass(HttpLocust):%0A host = %22http://127.0.0.1:8089%22%0A wait_time = between(2, 5)%0A task_set = UserTasks%0A%0A%0Aclass NotLocustSubclass():%0A host = %22http://localhost:8000%22%0A%0A'''%0A%0Aclass MockedLocustfile:%0A __slots__ = %5B%22filename%22, %22directory%22, %22file_path%22%5D%0A%0A%0A@contextmanager%0Adef mock_locustfile(filename_prefix=%22mock_locustfile%22, content=MOCK_LOUCSTFILE_CONTENT):%0A mocked = MockedLocustfile()%0A mocked.directory = os.path.dirname(os.path.abspath(__file__))%0A mocked.filename = %22%25s_%25s_%25i.py%22 %25 (%0A filename_prefix, %0A str(time.time()).replace(%22.%22, %22_%22), %0A random.randint(0,100000),%0A )%0A mocked.file_path = os.path.join(mocked.directory, mocked.filename)%0A with open(mocked.file_path, 'w') as file:%0A file.write(content)%0A %0A yield mocked%0A os.remove(mocked.file_path)%0A
|
|
fea7f350ce711d183fd9011c43ca68fff88400eb
|
Add cython compile util
|
utils/cython_compile_libs.py
|
utils/cython_compile_libs.py
|
Python
| 0.000001 |
@@ -0,0 +1,1250 @@
+#!/bin/env python%0Afrom __future__ import division, absolute_import, with_statement, print_function, unicode_literals%0Aimport os%0Aimport sys%0Aimport shutil%0Afrom pyximport.pyxbuild import pyx_to_dll%0AWD = os.path.dirname(os.path.dirname((os.path.abspath(__file__))))%0ALIBS = os.path.join(WD, 'libs')%0A# Adds the libs directory to the path%0Asys.path.append(LIBS)%0Aimport common%0A%0A%0Adef main():%0A filelist = common.parseFileList(%5BLIBS%5D, recursive=True)%0A try:%0A import pefile%0A filepath = pefile.__file__%5B:-1%5D%0A filelist.append(filepath)%0A except:%0A print('pefile not installed...')%0A for filename in filelist:%0A if filename.endswith('.py'):%0A filename = str(filename)%0A try:%0A pyx_to_dll(filename, inplace=True)%0A print(filename, 'successful!')%0A except Exception as e:%0A print('ERROR:', filename, 'failed')%0A try:%0A os.remove(filename%5B:-2%5D + 'c')%0A except:%0A pass%0A%0A # Cleanup build dirs%0A walk = os.walk(LIBS)%0A for path in walk:%0A path = path%5B0%5D%0A if os.path.basename(path) == '_pyxbld' and os.path.isdir(path):%0A shutil.rmtree(path)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
bdb841c99b443ab65ad30acc3ab5a88f5e3d7411
|
Correct version of build.gradle for building binaries
|
python/helpers/pydev/build_tools/build_binaries_windows.py
|
python/helpers/pydev/build_tools/build_binaries_windows.py
|
'''
Creating the needed environments for creating the pre-compiled distribution on Windods:
1. Download:
* conda32 at C:\tools\Miniconda32
* conda64 at C:\tools\Miniconda
Create the environments:
C:\tools\Miniconda32\Scripts\conda create -y -f -n py27_32 python=2.7 cython numpy nose ipython pip
C:\tools\Miniconda32\Scripts\activate py27_32
pip install "django>=1.7,<1.8"
pip install -U "setuptools>=0.9"
pip install -U "pip>=1.4" "wheel>=0.21" twine
deactivate
C:\tools\Miniconda32\Scripts\conda create -y -f -n py34_32 python=3.4 cython numpy nose ipython pip
C:\tools\Miniconda32\Scripts\activate py34_32
pip install "django>=1.9"
pip install -U "setuptools>=0.9"
pip install -U "pip>=1.4" "wheel>=0.21" twine
deactivate
C:\tools\Miniconda32\Scripts\conda create -y -f -n py35_32 python=3.5 cython numpy nose ipython pip
C:\tools\Miniconda32\Scripts\activate py35_32
pip install "django>=1.9"
pip install -U "setuptools>=0.9"
pip install -U "pip>=1.4" "wheel>=0.21" twine
deactivate
C:\tools\Miniconda\Scripts\conda create -y -f -n py27_64 python=2.7 cython numpy nose ipython pip
C:\tools\Miniconda\Scripts\activate py27_64
pip install "django>=1.7,<1.8"
pip install -U "setuptools>=0.9"
pip install -U "pip>=1.4" "wheel>=0.21" twine
deactivate
C:\tools\Miniconda\Scripts\conda create -y -f -n py34_64 python=3.4 cython numpy nose ipython pip
C:\tools\Miniconda\Scripts\activate py34_64
pip install "django>=1.9"
pip install -U "setuptools>=0.9"
pip install -U "pip>=1.4" "wheel>=0.21" twine
deactivate
C:\tools\Miniconda\Scripts\conda create -y -f -n py35_64 python=3.5 cython numpy nose ipython pip
C:\tools\Miniconda\Scripts\activate py35_64
pip install "django>=1.9"
pip install -U "setuptools>=0.9"
pip install -U "pip>=1.4" "wheel>=0.21" twine
deactivate
'''
from __future__ import unicode_literals
import os
import subprocess
import sys
miniconda32_envs = getattr(os.environ, 'MINICONDA32_ENVS', r'C:\tools\Miniconda32\envs')
miniconda64_envs = getattr(os.environ, 'MINICONDA64_ENVS', r'C:\tools\Miniconda\envs')
python_installations = [
r'%s\py27_32\python.exe' % miniconda32_envs,
r'%s\py34_32\python.exe' % miniconda32_envs,
r'%s\py35_32\python.exe' % miniconda32_envs,
r'%s\py27_64\python.exe' % miniconda64_envs,
r'%s\py34_64\python.exe' % miniconda64_envs,
r'%s\py35_64\python.exe' % miniconda64_envs,
]
root_dir = os.path.dirname(os.path.dirname(__file__))
def list_binaries():
for f in os.listdir(os.path.join(root_dir, '_pydevd_bundle')):
if f.endswith('.pyd'):
yield f
def extract_version(python_install):
return python_install.split('\\')[-2][2:]
def main():
from generate_code import generate_dont_trace_files
from generate_code import generate_cython_module
# First, make sure that our code is up to date.
generate_dont_trace_files()
generate_cython_module()
for python_install in python_installations:
assert os.path.exists(python_install)
from build import remove_binaries
remove_binaries()
for f in list_binaries():
raise AssertionError('Binary not removed: %s' % (f,))
for i, python_install in enumerate(python_installations):
new_name = 'pydevd_cython_%s_%s' % (sys.platform, extract_version(python_install))
args = [
python_install, os.path.join(root_dir, 'build_tools', 'build.py'), '--no-remove-binaries', '--target-pyd-name=%s' % new_name, '--force-cython']
if i != 0:
args.append('--no-regenerate-files')
print('Calling: %s' % (' '.join(args)))
subprocess.check_call(args)
if __name__ == '__main__':
main()
'''
To run do:
cd /D x:\PyDev.Debugger
set PYTHONPATH=x:\PyDev.Debugger
C:\tools\Miniconda32\envs\py27_32\python build_tools\build_binaries_windows.py
'''
|
Python
| 0 |
@@ -1876,36 +1876,26 @@
_envs =
-getattr(os.environ,
+os.getenv(
'MINICON
@@ -1959,28 +1959,18 @@
s =
-getattr(os.environ,
+os.getenv(
'MIN
|
d31f63a914877fe12d66497bdbc7dd6d871672fc
|
add solution for Best Time to Buy and Sell Stock
|
src/bestTimeToBuyAndSellStock.py
|
src/bestTimeToBuyAndSellStock.py
|
Python
| 0 |
@@ -0,0 +1,371 @@
+class Solution:%0A # @param prices, a list of integer%0A # @return an integer%0A%0A def maxProfit(self, prices):%0A n = len(prices)%0A if n %3C 2:%0A return 0%0A min_price = prices%5B0%5D%0A res = 0%0A for i in xrange(1, n):%0A res = max(res, prices%5Bi%5D-min_price)%0A min_price = min(min_price, prices%5Bi%5D)%0A return res%0A
|
|
595c8fad76696240f96e61d9a2299de3d6cda16a
|
Add utility for walking etree and yielding nodes if options class type match.
|
skcode/utility/walketree.py
|
skcode/utility/walketree.py
|
Python
| 0 |
@@ -0,0 +1,563 @@
+%22%22%22%0ASkCode utility for walking across a document tree.%0A%22%22%22%0A%0A%0Adef walk_tree_for_cls(tree_node, opts_cls):%0A %22%22%22%0A Walk the tree and yield any tree node matching the given options class.%0A :param tree_node: The current tree node instance.%0A :param opts_cls: The options class to search for.%0A %22%22%22%0A%0A # Check the current tree node first%0A if isinstance(tree_node.opts, opts_cls):%0A yield tree_node%0A%0A # Check all children nodes%0A for child in tree_node.children:%0A for node in walk_tree_for_cls(child, opts_cls):%0A yield node%0A
|
|
4e50597100b5e84b1ed3c304a3a7323e7bab7918
|
Create removeSequence.py
|
removeSequence.py
|
removeSequence.py
|
Python
| 0.000001 |
@@ -0,0 +1,2258 @@
+#!/usr/bin/python%0A%0A###############################################################################%0A#%0A# removeSequence.py version 1.0%0A# %0A# Removes a specified nucleotide sequence from the beginning of a larger sequence%0A#%0A# Useful for preparing FASTA files for certain processing pipelines that do not%0A# allow for distal barcodes or primers%0A#%0A# Copyright (C) 2014 Evan Denmark%0A#%0A# This program is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# This program is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with this program. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A#%0A###############################################################################%0A%0A%0A%0Aimport argparse%0A%0Aparser = argparse.ArgumentParser(description = ' ')%0Aparser.add_argument('fasta',help= 'fasta file with adaptor sequences')%0Aparser.add_argument('adaptor_sequence', help= 'string of nucleotides')%0A%0Afasta = parser.parse_args().fasta%0Aadaptor_sequence = parser.parse_args().adaptor_sequence%0A%0A%0Adef remove_fusion_adaptors(fasta, adaptor_sequence):%0A%09%22%22%22%0A%09Removes the fusion adaptor at the beginning of each sequence of a FASTA file%0A%09%22%22%22%0A%09fasta = str(fasta)%0A%09old_file = open(fasta, 'r')%0A%09new_file = open('new_'+fasta+'.fasta', 'w')%0A%09length_adaptor = len(adaptor_sequence)%09%0A%09for each_line in old_file:%0A%09%09each_line = str(each_line)%0A%09%09if each_line%5B0%5D == '%3E':%0A%09%09%09#name line%0A%09%09%09new_file.write(each_line)%0A%09%09else:%0A%09%09%09#sequence line%0A%09%09%09if each_line%5B:(length_adaptor)%5D == adaptor_sequence:%0A%09%09%09%09current_line = each_line%5B:%5D.rstrip('%5Cn').lstrip()%0A%09%09%09%09current_line = str(current_line)%0A%09%09%09%09adaptor_sequence=adaptor_sequence%5B:length_adaptor%5D%0A%09%09%09%09new_line = current_line%5Blength_adaptor:%5D%0A%09%09%09%09new_file.write(new_line+'%5Cn')%0A%09%09%09%09%0A%09old_file.close()%0A%09new_file.close()%0A%0Aremove_fusion_adaptors(fasta, adaptor_sequence)%0A%09%09%09%0A%09%09%0A%09%09%09%0A
|
|
b7d15547bd88c6304c5d8ceb1f74481cb4d162e7
|
Add parser hacking example
|
repeat_n_times.py
|
repeat_n_times.py
|
Python
| 0.000002 |
@@ -0,0 +1,1026 @@
+# -*- encoding: utf-8 -*-%0A%0Afrom jinja2 import Environment%0Afrom jinja2.ext import Extension%0Afrom jinja2 import nodes%0A%0A%0Aclass RepeatNTimesExtension(Extension):%0A%0A tags = %7B%22repeat%22%7D%0A%0A def parse(self, parser):%0A lineno = next(parser.stream).lineno%0A index = nodes.Name(%22_%22, %22store%22, lineno=lineno)%0A how_many_times = parser.parse_expression()%0A iterable = nodes.Call(nodes.Name(%22range%22, %22load%22), %5Bhow_many_times%5D, %5B%5D, None, None)%0A parser.stream.expect(%22name:times%22)%0A body = parser.parse_statements(%5B%22name:endrepeat%22%5D, drop_needle=True)%0A return nodes.For(index, iterable, body, %5B%5D, None, False, lineno=lineno)%0A%0A%0Aif __name__ == %22__main__%22:%0A env = Environment()%0A env.add_extension(RepeatNTimesExtension)%0A template = env.from_string(u%22%22%22%0A %7B%25- repeat 3 times -%25%7D%0A %7B%25 if not loop.first and not loop.last %25%7D, %7B%25 endif -%25%7D%0A %7B%25 if loop.last %25%7D %D0%B8 %D0%B5%D1%89%D1%91 %D1%80%D0%B0%D0%B7 %7B%25 endif -%25%7D%0A %D1%83%D1%87%D0%B8%D1%82%D1%8C%D1%81%D1%8F%0A %7B%25- endrepeat -%25%7D%0A %22%22%22)%0A print(template.render())%0A
|
|
e3365aa8d9f5e49d3aff732d169c22a46ef22904
|
Create viriback_tracker.py (#452)
|
plugins/feeds/public/viriback_tracker.py
|
plugins/feeds/public/viriback_tracker.py
|
Python
| 0 |
@@ -0,0 +1,1791 @@
+import logging%0Afrom dateutil import parser%0Afrom datetime import timedelta, datetime%0A%0Afrom core import Feed%0Afrom core.errors import ObservableValidationError%0Afrom core.observables import Url, Ip%0A%0A%0Aclass ViriBackTracker(Feed):%0A default_values = %7B%0A %22frequency%22: timedelta(hours=24),%0A %22name%22: %22ViriBackTracker%22,%0A %22source%22: %22http://tracker.viriback.com/dump.php%22,%0A %22description%22:%0A %22Malware C2 Urls and IPs%22,%0A %7D%0A%0A def update(self):%0A for line in self.update_csv(delimiter=',', quotechar='%22'):%0A if not line or line%5B0%5D.startswith((%22Family%22, %22#%22)):%0A continue%0A%0A family, url, ip, first_seen = line%0A first_seen = parser.parse(first_seen)%0A if self.last_run is not None:%0A if self.last_run %3E first_seen:%0A continue%0A%0A self.analyze(family, url, ip, first_seen)%0A%0A def analyze(self, family, url, ip, first_seen):%0A%0A url_obs = False%0A ip_obs = False%0A%0A family = family.lower()%0A context = %7B%0A 'first_seen': first_seen,%0A 'source': self.name%0A %7D%0A%0A if url:%0A try:%0A url_obs = Url.get_or_create(value=url)%0A url_obs.add_context(context)%0A url_obs.add_source(self.name)%0A url_obs.tag(%5B%22c2%22, family%5D)%0A except ObservableValidationError as e:%0A logging.error(e)%0A%0A if ip:%0A try:%0A ip_obs = Ip.get_or_create(value=ip)%0A ip_obs.add_context(context)%0A ip_obs.tag(family.lower())%0A except ObservableValidationError as e:%0A logging.error(e)%0A%0A if url_obs and ip_obs:%0A url_obs.active_link_to(ip_obs, 'ip', self.name)%0A
|
|
5a5c30e701220cc874d08a442af0e81d2020aacf
|
bump dev version
|
symposion/__init__.py
|
symposion/__init__.py
|
__version__ = "1.0b1.dev42"
|
Python
| 0 |
@@ -22,7 +22,7 @@
dev4
-2
+3
%22%0A
|
85336dfed46145c36307f218612db7c4d8dbf637
|
bump version
|
symposion/__init__.py
|
symposion/__init__.py
|
__version__ = "1.0b1.dev17"
|
Python
| 0 |
@@ -22,7 +22,7 @@
dev1
-7
+8
%22%0A
|
c642a32b1aff0c9adc8e62aad8ceb7e0396512ed
|
bump version
|
symposion/__init__.py
|
symposion/__init__.py
|
__version__ = "1.0b1.dev13"
|
Python
| 0 |
@@ -22,7 +22,7 @@
dev1
-3
+4
%22%0A
|
c36a954dbdfcca6e520dca6b96c1c97f496880ca
|
Add test for forcefield_labeler
|
smarty/tests/test_forcefield_labeler.py
|
smarty/tests/test_forcefield_labeler.py
|
Python
| 0 |
@@ -0,0 +1,1249 @@
+from functools import partial%0Aimport smarty%0Aimport openeye%0Afrom openeye.oechem import *%0Aimport os%0Afrom smarty.utils import get_data_filename%0Aimport numpy as np%0Afrom smarty.forcefield_labeler import *%0A%0A%0Adef test_read_ffxml():%0A %22%22%22Test reading of ffxml files.%0A %22%22%22%0A labeler = ForceField_labeler(get_data_filename('forcefield/Frosst_AlkEtOH.ffxml'))%0A%0Adef test_molecule_labeling(verbose = False):%0A %22%22%22Test using ForceField_labeler to provide force terms applied to an oemol.%22%22%22%0A mol = OEMol()%0A OEParseSmiles(mol, 'CCC')%0A OEAddExplicitHydrogens(mol)%0A labeler = ForceField_labeler(get_data_filename('forcefield/Frosst_AlkEtOH.ffxml'))%0A labels = labeler.labelMolecules( %5Bmol%5D, verbose = verbose)%0A%0A # Check that force terms aren't empty%0A if not 'HarmonicBondForce' in labels%5B0%5D.keys(): %0A raise Exception(%22No force term assigned for harmonic bonds.%22)%0A if not 'HarmonicAngleForce' in labels%5B0%5D.keys(): %0A raise Exception(%22No force term assigned for harmonic angles.%22)%0A if not 'PeriodicTorsionForce' in labels%5B0%5D.keys(): %0A raise Exception(%22No force term assigned for periodic torsions.%22)%0A if not 'NonbondedForce' in labels%5B0%5D.keys(): %0A raise Exception(%22No nonbonded force term assigned.%22)%0A%0A%0A
|
|
7d1fde66e0fd6b3b8cc9876e0d3271d6776b347f
|
convert tiffs to video added
|
image_to_video.py
|
image_to_video.py
|
Python
| 0.001017 |
@@ -0,0 +1,1447 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0ACreated on Tue May 15 16:11:55 2018%0A%0A@author: LaVision%0A%22%22%22%0A%0A#!/usr/local/bin/python3%0A%0Aimport cv2%0Aimport argparse%0Aimport os%0A%0A# Construct the argument parser and parse the arguments%0Aap = argparse.ArgumentParser()%0Aap.add_argument(%22-ext%22, %22--extension%22, required=False, default='png', help=%22extension name. default is 'tif'.%22)%0Aap.add_argument(%22-o%22, %22--output%22, required=False, default='output.mp4', help=%22output video file%22)%0Aargs = vars(ap.parse_args())%0A%0A# Arguments%0Adir_path = '.'%0Aext = args%5B'extension'%5D%0Aoutput = args%5B'output'%5D%0A%0Aimages = %5B%5D%0Afor f in os.listdir(dir_path):%0A if f.endswith(ext):%0A images.append(f)%0A%0A# Determine the width and height from the first image%0Aimage_path = os.path.join(dir_path, images%5B0%5D)%0Aframe = cv2.imread(image_path)%0Acv2.imshow('video',frame)%0Aheight, width, channels = frame.shape%0A%0A# Define the codec and create VideoWriter object%0Afourcc = cv2.VideoWriter_fourcc(*'mp4v') # Be sure to use lower case%0Aout = cv2.VideoWriter(output, fourcc, 20.0, (width, height))%0A%0Afor image in images:%0A%0A image_path = os.path.join(dir_path, image)%0A print(%22processing%22, image_path)%0A frame = cv2.imread(image_path)%0A%0A out.write(frame) # Write out frame to video%0A%0A cv2.imshow('video',frame)%0A if (cv2.waitKey(1) & 0xFF) == ord('q'): # Hit %60q%60 to exit%0A break%0A%0A# Release everything if job is finished%0Aout.release()%0Acv2.destroyAllWindows()%0A%0Aprint(%22The output video is %7B%7D%22.format(output))
|
|
f67514bf9ed193c0a8ac68c2258913bb54df8a88
|
Create save_py_source.py
|
save_py_source.py
|
save_py_source.py
|
Python
| 0 |
@@ -0,0 +1,1083 @@
+import datetime, os, zipfile%0A%0Aexts = '.py pyui'.split()%0Azip_file_name = 'aa_source_code_%25Y_%25m_%25d_%25H_%25M_%25S.zip'%0Azip_file_name = datetime.datetime.strftime(datetime.datetime.now(), zip_file_name)%0A%0Adef get_filenames(in_dir=None):%0A def visit(_, dirname, names):%0A for name in names:%0A filename = os.path.join(dirname, name)%0A if os.path.isfile(filename):%0A filenames.append(filename)%0A%0A in_dir = in_dir or os.curdir%0A filenames = %5B%5D%0A os.path.walk(in_dir or os.curdir, visit, None)%0A return filenames%0A%0Afilenames = get_filenames()%0Aif exts:%0A filenames = %5Bfn for fn in filenames if os.path.splitext(fn)%5B1%5D in exts%5D%0Afile_count = len(filenames)%0Aprint('%7B%7D files found.'.format(file_count))%0Aif filenames:%0A with zipfile.ZipFile(zip_file_name, 'w') as zip_file:%0A for i, filename in enumerate(filenames):%0A zip_file.write(filename)%0A if not i %25 50:%0A print('%7B%7D of %7B%7D: %7B%7D'.format(i, file_count, filename))%0Aprint('%7B%7D%5Cn%7B%7D files copied into zip file: %22%7B%7D%22.'.format('=' * 13, file_count, zip_file_name))%0A
|
|
2519e7c8289a6045208013b0958fc4c9f49ff39a
|
lexographic permutations: python
|
lexographic_permutations/python/lexographic_permutations.py
|
lexographic_permutations/python/lexographic_permutations.py
|
Python
| 0.999828 |
@@ -0,0 +1,332 @@
+import itertools%0A%0Adef permute(input):%0A%09if len(input) == 2:%0A%09%09return %5Binput, input%5B::-1%5D%5D%0A%0A%09permutations = %5B%5D%0A%09for i in range(0,len(input)):%0A%09%09permutations.append(map(lambda x: input%5Bi%5D+x, permute(input%5B:i%5D+input%5Bi+1:%5D)))%0A%09return sum(permutations, %5B%5D)%0A%0Adef prepend(str1, str2):%0A%09return str1+str2%0A%0Aprint permute(%220123456789%22)%5B999999%5D%0A
|
|
c7c02febb43eb2466484f5c99d6dcc2d60e67e09
|
add docker.py
|
zblogsite/settings/docker.py
|
zblogsite/settings/docker.py
|
Python
| 0.000004 |
@@ -0,0 +1,776 @@
+from .base import *%0D%0A%0D%0ADEBUG = True%0D%0A%0D%0ADATABASES = %7B%0D%0A 'default': %7B%0D%0A 'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.%0D%0A 'NAME': 'zblog', # Or path to database file if using sqlite3.%0D%0A # The following settings are not used with sqlite3:%0D%0A 'USER': 'root',%0D%0A 'PASSWORD': '1234',%0D%0A #'HOST': '127.0.0.1', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.%0D%0A 'HOST': 'db', # docker mysql.%0D%0A 'PORT': '3306', # Set to empty string for default.%0D%0A 'OPTIONS': %7B%0D%0A 'init_command': %22Set sql_mode='STRICT_TRANS_TABLES'%22%0D%0A %7D%0D%0A %7D%0D%0A%7D%0D%0A
|
|
f60f31c73deef7768af5eb45046a8848f2dc40c4
|
Create draw_neural_net.py
|
draw/draw_neural_net.py
|
draw/draw_neural_net.py
|
Python
| 0.000023 |
@@ -0,0 +1,1962 @@
+import matplotlib.pyplot as plt%0A%0Adef draw_neural_net(ax, left, right, bottom, top, layer_sizes):%0A '''%0A Draw a neural network cartoon using matplotilb.%0A %0A :usage:%0A %3E%3E%3E fig = plt.figure(figsize=(12, 12))%0A %3E%3E%3E draw_neural_net(fig.gca(), .1, .9, .1, .9, %5B4, 7, 2%5D)%0A %0A :parameters:%0A - ax : matplotlib.axes.AxesSubplot%0A The axes on which to plot the cartoon (get e.g. by plt.gca())%0A - left : float%0A The center of the leftmost node(s) will be placed here%0A - right : float%0A The center of the rightmost node(s) will be placed here%0A - bottom : float%0A The center of the bottommost node(s) will be placed here%0A - top : float%0A The center of the topmost node(s) will be placed here%0A - layer_sizes : list of int%0A List of layer sizes, including input and output dimensionality%0A '''%0A n_layers = len(layer_sizes)%0A v_spacing = (top - bottom)/float(max(layer_sizes))%0A h_spacing = (right - left)/float(len(layer_sizes) - 1)%0A # Nodes%0A for n, layer_size in enumerate(layer_sizes):%0A layer_top = v_spacing*(layer_size - 1)/2. + (top + bottom)/2.%0A for m in xrange(layer_size):%0A circle = plt.Circle((n*h_spacing + left, layer_top - m*v_spacing), v_spacing/4.,%0A color='w', ec='k', zorder=4)%0A ax.add_artist(circle)%0A # Edges%0A for n, (layer_size_a, layer_size_b) in enumerate(zip(layer_sizes%5B:-1%5D, layer_sizes%5B1:%5D)):%0A layer_top_a = v_spacing*(layer_size_a - 1)/2. + (top + bottom)/2.%0A layer_top_b = v_spacing*(layer_size_b - 1)/2. + (top + bottom)/2.%0A for m in xrange(layer_size_a):%0A for o in xrange(layer_size_b):%0A line = plt.Line2D(%5Bn*h_spacing + left, (n + 1)*h_spacing + left%5D,%0A %5Blayer_top_a - m*v_spacing, layer_top_b - o*v_spacing%5D, c='k')%0A ax.add_artist(line)%0A
|
|
041b55f3a9ded360146f6e2dda74a6b20b3e6f7e
|
Add scrape_results
|
scrape_results.py
|
scrape_results.py
|
Python
| 0 |
@@ -0,0 +1,766 @@
+from selenium import webdriver%0Afrom time import sleep%0Afrom bs4 import BeautifulSoup%0A%0Adriver = webdriver.Firefox()%0Adriver.get(%22http://www.nitt.edu/prm/ShowResult.htm%22)%0A%0Adriver.get(%22javascript:(function()%7Bdocument.getElementsByName('main')%5B0%5D.contentWindow.document.getElementById('TextBox1').value=110113006;%7D());%22)%0A%0A%0Adriver.get(%22javascript:(function()%7Bdocument.getElementsByName('main')%5B0%5D.contentWindow.document.getElementById('Button1').click();%7D());%22)%0A%0Asleep(1)%0A%0Adriver.get(%22javascript:(function()%7Bdocument.getElementsByName('main')%5B0%5D.contentWindow.document.getElementById('Dt1').selectedIndex = 1;document.getElementsByName('main')%5B0%5D.contentWindow.document.getElementById('Dt1').onchange();%7D());%22)%0A%0Aprint %22I have got the page to a specific student's result!!%22%0A
|
|
dc52b5914c4d0024458eefeb3b3576aa58692345
|
Remove print
|
organizations/decorators.py
|
organizations/decorators.py
|
# encoding: utf-8
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect, HttpResponse
from seaserv import get_user_current_org
def org_staff_required(func):
"""
Decorator for views that checks the user is org staff.
"""
def _decorated(request, *args, **kwargs):
user = request.user.username
url_prefix = kwargs.get('url_prefix', '')
org = get_user_current_org(user, url_prefix)
print url_prefix
print org._dict
if org and org.is_staff:
return func(request, *args, **kwargs)
return HttpResponseRedirect(reverse('myhome'))
return _decorated
|
Python
| 0.000016 |
@@ -453,57 +453,8 @@
ix)%0A
- print url_prefix%0A print org._dict%0A
|
55bf42057bcd9e14d964b2064f9322c164ba91ff
|
Test request construction (#91)
|
test/test_requests.py
|
test/test_requests.py
|
Python
| 0 |
@@ -0,0 +1,2340 @@
+import unittest%0A%0Aimport requests%0Aimport requests_mock%0A%0Aimport tableauserverclient as TSC%0A%0A%0Aclass RequestTests(unittest.TestCase):%0A def setUp(self):%0A self.server = TSC.Server('http://test')%0A%0A # Fake sign in%0A self.server._site_id = 'dad65087-b08b-4603-af4e-2887b8aafc67'%0A self.server._auth_token = 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM'%0A%0A self.baseurl = self.server.workbooks.baseurl%0A%0A def test_make_get_request(self):%0A with requests_mock.mock() as m:%0A m.get(requests_mock.ANY)%0A url = %22http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks%22%0A opts = TSC.RequestOptions(pagesize=13, pagenumber=13)%0A resp = self.server.workbooks._make_request(requests.get,%0A url,%0A content=None,%0A request_object=opts,%0A auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM',%0A content_type='text/xml')%0A%0A self.assertEquals(resp.request.query, 'pagenumber=13&pagesize=13')%0A self.assertEquals(resp.request.headers%5B'x-tableau-auth'%5D, 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM')%0A self.assertEquals(resp.request.headers%5B'content-type'%5D, 'text/xml')%0A%0A def test_make_post_request(self):%0A with requests_mock.mock() as m:%0A m.post(requests_mock.ANY)%0A url = %22http://test/api/2.3/sites/dad65087-b08b-4603-af4e-2887b8aafc67/workbooks%22%0A resp = self.server.workbooks._make_request(requests.post,%0A url,%0A content=b'1337',%0A request_object=None,%0A auth_token='j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM',%0A content_type='multipart/mixed')%0A self.assertEquals(resp.request.headers%5B'x-tableau-auth'%5D, 'j80k54ll2lfMZ0tv97mlPvvSCRyD0DOM')%0A self.assertEquals(resp.request.headers%5B'content-type'%5D, 'multipart/mixed')%0A self.assertEquals(resp.request.body, b'1337')%0A
|
|
d149f9d64c55ea38c7be20a1c759dc802a16a793
|
Update doc string (Fix #491)
|
homeassistant/components/notify/smtp.py
|
homeassistant/components/notify/smtp.py
|
"""
homeassistant.components.notify.mail
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Mail (SMTP) notification service.
Configuration:
To use the Mail notifier you will need to add something like the following
to your configuration.yaml file.
notify:
platform: mail
server: MAIL_SERVER
port: YOUR_SMTP_PORT
sender: SENDER_EMAIL_ADDRESS
starttls: 1 or 0
username: YOUR_SMTP_USERNAME
password: YOUR_SMTP_PASSWORD
recipient: YOUR_RECIPIENT
Variables:
server
*Required
SMTP server which is used to end the notifications. For Google Mail, eg.
smtp.gmail.com. Keep in mind that Google has some extra layers of protection
which need special attention (Hint: 'Less secure apps').
port
*Required
The port that the SMTP server is using, eg. 587 for Google Mail and STARTTLS
or 465/993 depending on your SMTP servers.
sender
*Required
E-Mail address of the sender.
starttls
*Optional
Enables STARTTLS, eg. 1 or 0.
username
*Required
Username for the SMTP account.
password
*Required
Password for the SMTP server that belongs to the given username. If the
password contains a colon it need to be wrapped in apostrophes.
recipient
*Required
Recipient of the notification.
"""
import logging
import smtplib
from email.mime.text import MIMEText
from homeassistant.helpers import validate_config
from homeassistant.components.notify import (
DOMAIN, ATTR_TITLE, BaseNotificationService)
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config):
""" Get the mail notification service. """
if not validate_config(config,
{DOMAIN: ['server',
'port',
'sender',
'username',
'password',
'recipient']},
_LOGGER):
return None
smtp_server = config[DOMAIN]['server']
port = int(config[DOMAIN]['port'])
username = config[DOMAIN]['username']
password = config[DOMAIN]['password']
server = None
try:
server = smtplib.SMTP(smtp_server, port)
server.ehlo()
if int(config[DOMAIN]['starttls']) == 1:
server.starttls()
server.ehlo()
try:
server.login(username, password)
except (smtplib.SMTPException, smtplib.SMTPSenderRefused) as error:
_LOGGER.exception(error,
"Please check your settings.")
return None
except smtplib.socket.gaierror:
_LOGGER.exception(
"SMTP server not found. "
"Please check the IP address or hostname of your SMTP server.")
return None
except smtplib.SMTPAuthenticationError:
_LOGGER.exception(
"Login not possible. "
"Please check your setting and/or your credentials.")
return None
if server:
server.quit()
return MailNotificationService(
config[DOMAIN]['server'],
config[DOMAIN]['port'],
config[DOMAIN]['sender'],
config[DOMAIN]['starttls'],
config[DOMAIN]['username'],
config[DOMAIN]['password'],
config[DOMAIN]['recipient']
)
# pylint: disable=too-few-public-methods, too-many-instance-attributes
class MailNotificationService(BaseNotificationService):
""" Implements notification service for E-Mail messages. """
# pylint: disable=too-many-arguments
def __init__(self, server, port, sender, starttls, username,
password, recipient):
self._server = server
self._port = port
self._sender = sender
self.starttls = int(starttls)
self.username = username
self.password = password
self.recipient = recipient
self.tries = 2
self.mail = None
self.connect()
def connect(self):
""" Connect/Authenticate to SMTP Server """
self.mail = smtplib.SMTP(self._server, self._port)
self.mail.ehlo_or_helo_if_needed()
if self.starttls == 1:
self.mail.starttls()
self.mail.ehlo()
self.mail.login(self.username, self.password)
def send_message(self, message="", **kwargs):
""" Send a message to a user. """
subject = kwargs.get(ATTR_TITLE)
msg = MIMEText(message)
msg['Subject'] = subject
msg['To'] = self.recipient
msg['From'] = self._sender
msg['X-Mailer'] = 'HomeAssistant'
for _ in range(self.tries):
try:
self.mail.sendmail(self._sender, self.recipient,
msg.as_string())
break
except smtplib.SMTPException:
_LOGGER.warning('SMTPException sending mail: '
'retrying connection')
self.connect()
|
Python
| 0 |
@@ -29,20 +29,20 @@
.notify.
-mail
+smtp
%0A~~~~~~~
@@ -133,20 +133,20 @@
use the
-Mail
+smtp
notifie
@@ -251,20 +251,20 @@
atform:
-mail
+smtp
%0A serve
|
35f98c14a74e207c616fcb57538bb176842c0d1e
|
Add procfile and wsgi entrypoint
|
nhs/wsgi.py
|
nhs/wsgi.py
|
Python
| 0 |
@@ -0,0 +1,1142 @@
+%22%22%22%0AWSGI config for Nhs Prescriptions project.%0A%0AThis module contains the WSGI application used by Django's development server%0Aand any production WSGI deployments. It should expose a module-level variable%0Anamed %60%60application%60%60. Django's %60%60runserver%60%60 and %60%60runfcgi%60%60 commands discover%0Athis application via the %60%60WSGI_APPLICATION%60%60 setting.%0A%0AUsually you will have the standard Django WSGI application here, but it also%0Amight make sense to replace the whole Django WSGI application with a custom one%0Athat later delegates to the Django one. For example, you could introduce WSGI%0Amiddleware here, or combine a Django application with an application of another%0Aframework.%0A%0A%22%22%22%0Aimport os%0A%0Aos.environ.setdefault(%22DJANGO_SETTINGS_MODULE%22, %22nhs.settings%22)%0A%0A# This application object is used by any WSGI server configured to use this%0A# file. This includes Django's development server, if the WSGI_APPLICATION%0A# setting points here.%0Afrom django.core.wsgi import get_wsgi_application%0Aapplication = get_wsgi_application()%0A%0A# Apply WSGI middleware here.%0A# from helloworld.wsgi import HelloWorldApplication%0A# application = HelloWorldApplication(application)%0A
|
|
ebd41d7f264de92be19347042749ef48d5820b7d
|
add inner product demo
|
study/language_core_and_lib/function/functional_example.py
|
study/language_core_and_lib/function/functional_example.py
|
Python
| 0 |
@@ -0,0 +1,303 @@
+def demo_inner_product():%0A vec0 = range(10)%0A vec1 = %5Bi ** 2 for i in range(10)%5D%0A print 'inner product:', reduce(lambda l, r: l + r, map(lambda ele: ele%5B0%5D * ele%5B1%5D, zip(vec0, vec1)), 0)%0A print 'verify:', sum(%5Bi ** 3 for i in range(10)%5D)%0A%0A%0Aif __name__ == '__main__':%0A demo_inner_product()%0A
|
|
6d51e3eae867e65c35772a647a40535fc088bb5c
|
version 0.41
|
client/version.py
|
client/version.py
|
ELECTRUM_VERSION = "0.40b"
SEED_VERSION = 4 # bump this everytime the seed generation is modified
|
Python
| 0.000001 |
@@ -20,10 +20,9 @@
%220.4
-0b
+1
%22%0ASE
|
7bd3d26427c08cf38f2f7dedbf075e1335447f70
|
add config for database
|
config/database.py
|
config/database.py
|
Python
| 0.000001 |
@@ -0,0 +1,75 @@
+mongorc = %7B%0A 'host': '127.0.0.1',%0A 'port': 27017,%0A 'db': 'demo'%0A%7D%0A
|
|
f3325695a78f528af6f3c2adb6024dc71405af8f
|
Create kaynaksız_sil.py
|
kaynaksız_sil.py
|
kaynaksız_sil.py
|
Python
| 0.000032 |
@@ -0,0 +1,1606 @@
+# -*- coding: utf-8 -*-%0A# !/usr/bin/python%0A%0Afrom bs4 import BeautifulSoup%0Aimport requests%0Aimport mavri%0Aimport re%0Aimport random%0A%0A%0Axx= mavri.login('tr.wikipedia','Mavrikant Bot')%0A%0Awiki='tr.wikipedia'%0Atemplate='%C5%9Eablon:Kaynaks%C4%B1z'%0A%0Aticontinue = ''%0Awhile ticontinue != 'DONE':%0A allpages= requests.get('https://' + wiki + '.org/w/api.php?action=query&utf8&format=json&tiprop=title&titles='+template+'&prop=transcludedin&tilimit=500&ticontinue='+str(ticontinue))%0A try:%0A ticontinue =allpages.json()%5B'continue'%5D%5B'ticontinue'%5D%0A except:%0A ticontinue = 'DONE'%0A%0A for page in allpages.json()%5B'query'%5D%5B'pages'%5D.itervalues().next()%5B'transcludedin'%5D:%0A%0A title = page%5B'title'%5D%0A%09#print title%0A%0A%09content = mavri.content_of_page(wiki, title)%0A%09kaynak_sayisi= len(re.findall(ur'%3C%5Cs?ref%5Cs?%5C%3E', content))%0A%09print kaynak_sayisi %0A%09if (kaynak_sayisi%3E0):%0A%09%09print title %0A%09%09content = mavri.content_of_section(wiki,title,0,xx)%0A%09%09content = re.sub(ur'%5C%7B%5C%7B%5Cs?%5BKk%5Daynaks%C4%B1z%5B%5E%5C%7D%5D*%5C%7D%5C%7D%5Cs?%5Cn?', '', content)%0A%0A%09%09params3 = '?format=json&action=tokens'%0A%09 %09r3 = requests.get('https://' + wiki + '.org/w/api.php' + params3, cookies=xx.cookies)%0A%09 %09edit_token = r3.json()%5B'tokens'%5D%5B'edittoken'%5D%0A%09 %09edit_cookie = xx.cookies.copy()%0A%09 %09edit_cookie.update(r3.cookies)%0A%0A%0A%09 %09payload = %7B'action': 'edit', 'assert': 'user', 'format': 'json', 'utf8': '', 'section': str(0), 'text': content, 'summary': '-Kaynaks%C4%B1z %C5%9Fablonu, '+str(kaynak_sayisi)+' adet kaynak var', 'title': title, 'token': edit_token, 'bot': ''%7D%0A%09 %09requests.post('https://' + wiki + '.org/w/api.php', data=payload, cookies=edit_cookie)%09%09%0A%0Aexit(0)%0A%0A%0A%0A
|
|
118e47c2bc307d8de447e9d37973feca44763ab5
|
Create __init__.py
|
packs/astral/actions/lib/__init__.py
|
packs/astral/actions/lib/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1,35 @@
+from .BaseAction import BaseAction%0A
|
|
f5c56152771fbafc5ac9161ccd453a240bfca5cc
|
Add get_history example.
|
examples/get_history.py
|
examples/get_history.py
|
Python
| 0 |
@@ -0,0 +1,780 @@
+import sys%0Asys.path.append('../')%0A%0Aimport zabbix%0Afrom datetime import datetime%0Afrom datetime import timedelta%0Afrom calendar import timegm%0A%0A# read config file%0Aconfig = %7B%7D%0Aexecfile(%22config.py%22, config)%0A%0A# new api instance%0Aserver = config%5B%22server%22%5D%0Aapi = zabbix.Api(server)%0A%0A# log in%0Ausername = config%5B%22user%22%5D%0Apassword = config%5B%22password%22%5D%0Aapi.login(username, password)%0A%0A# get history%0A# host id%0Ahttp_host_id = config%5B%22http_host_id%22%5D%0A# item id%0Ahttp_processor_time = config%5B%22http_processor_time_id%22%5D%0A# start time and end time%0Atime_from = timegm((datetime.now() - timedelta(minutes = 100)).utctimetuple()) - 150000%0Atime_till = timegm(datetime.now().utctimetuple()) - 150000%0Aprint api.get_history('float', http_host_id, http_processor_time, time_from, time_till)%0A%0A# log out%0Aapi.logout()%0A
|
|
26afdc032087693d274966a803a6bb3c77d17549
|
add request example
|
examples/request/req.py
|
examples/request/req.py
|
Python
| 0 |
@@ -0,0 +1,605 @@
+from app import Application%0A%0Adef dump(request):%0A text = %22%22%22%0AMethod: %7B0.method%7D%0APath: %7B0.path%7D%0AVersion: %7B0.version%7D%0AHeaders: %7B0.headers%7D%0AMatch: %7B0.match_dict%7D%0ABody: %7B0.body%7D%0AQS: %7B0.query_string%7D%0Aquery: %7B0.query%7D%0Amime_type: %7B0.mime_type%7D%0Aencoding: %7B0.encoding%7D%0Aform: %7B0.form%7D%0Akeep_alive: %7B0.keep_alive%7D%0Aroute: %7B0.route%7D%0Ahostname: %7B0.hostname%7D%0Aport: %7B0.port%7D%0Aremote_addr: %7B0.remote_addr%7D%0A%22%22%22.strip().format(request)%0A%0A return request.Response(text=text)%0A%0A%0Aif __name__ == '__main__':%0A app = Application()%0A app.router.add_route('/', dump)%0A app.router.add_route('/%7Ba%7D/%7Bb%7D', dump)%0A%0A app.serve()%0A
|
|
b84a2667b5071ede3eb983364195c3a2d3c97543
|
Create MQTTstage.py
|
MQTTstage.py
|
MQTTstage.py
|
Python
| 0.000006 |
@@ -0,0 +1,62 @@
+#!/usr/bin/python%0A%0A%0A#Check if the %0Adef CheckDirectories():%0A %0A
|
|
0db094aba5095b63a8f9bfb066afb0048617f87e
|
add update_GeneAtlas_images.py
|
scheduled_bots/scripts/update_GeneAtlas_images.py
|
scheduled_bots/scripts/update_GeneAtlas_images.py
|
Python
| 0.000001 |
@@ -0,0 +1,1932 @@
+%22%22%22%0AOne off script to change GeneAtlas images to point to full-sized versions%0Ahttps://github.com/SuLab/GeneWikiCentral/issues/1%0A%0AAs described at https://www.wikidata.org/wiki/Property_talk:P692#How_about_using_full_size_image_instead_of_small_thumbnail.3F%0Aupdate all uses of the Gene Atlas Image property to use the full-sized version of the Gene Atlas image%0A(e.g., https://www.wikidata.org/wiki/File:PBB_GE_ACTN3_206891_at_fs.png) instead of the thumbnail%0A(e.g., https://www.wikidata.org/wiki/File:PBB_GE_ACTN3_206891_at_tn.png)%0A%0ASELECT ?item ?image%0AWHERE%0A%7B%0A ?item wdt:P351 ?entrez .%0A ?item wdt:P703 wd:Q15978631 .%0A ?item wdt:P692 ?image%0A%7D limit 1000%0A%22%22%22%0Afrom collections import defaultdict%0A%0Afrom scheduled_bots.local import WDPASS, WDUSER%0Afrom tqdm import tqdm%0Afrom wikidataintegrator import wdi_core, wdi_login, wdi_helpers%0Aimport urllib.request%0A%0Alogin = wdi_login.WDLogin(WDUSER, WDPASS)%0A%0Aimage_qid = wdi_helpers.id_mapper(%22P692%22, %5B(%22P703%22, %22Q15978631%22)%5D)%0Aqid_images = defaultdict(list)%0Afor image, qid in image_qid.items():%0A qid_images%5Bqid%5D.append(image)%0Aqid_images = dict(qid_images)%0A%0Afor qid, images in tqdm(qid_images.items()):%0A images = %5Burllib.request.unquote(image.replace(%22http://commons.wikimedia.org/wiki/Special:FilePath/%22, %22%22)) for image in images%5D%0A images_proc = %5Bimage for image in images if image.startswith(%22PBB GE%22) and image.endswith(%22at tn.png%22)%5D%0A if not images_proc:%0A continue%0A images_keep = %5Bimage for image in images if image.startswith(%22PBB GE%22) and image.endswith(%22at fs.png%22)%5D%0A%0A item = wdi_core.WDItemEngine(wd_item_id=qid)%0A%0A s = %5B%5D%0A for image in images_proc:%0A s.append(wdi_core.WDCommonsMedia(image.replace(%22 at tn.png%22, %22 at fs.png%22), %22P692%22))%0A for image in images_keep:%0A s.append(wdi_core.WDCommonsMedia(image, %22P692%22))%0A item.update(data=s)%0A wdi_helpers.try_write(item, '', '', login, edit_summary=%22replace thumbnail gene atlas image with fs%22)%0A
|
|
427a95f0c56facc138448cde7e7b9da1bcdc8ea4
|
Add super basic Hypothesis example
|
add_example.py
|
add_example.py
|
Python
| 0.001464 |
@@ -0,0 +1,399 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0A# Unit Tests%0A%0Adef test_add_zero():%0A assert 0 + 1 == 1 + 0%0A%0Adef test_add_single_digits():%0A assert 1 + 2 == 2 + 1%0A%0Adef test_add_double_digits():%0A assert 10 + 12 == 12 + 10%0A%0A%0A# Property-based Test%0A%0Afrom hypothesis import given%0Aimport hypothesis.strategies as st%0A%0A@given(st.integers(), st.integers())%0Adef test_add(x, y):%0A assert x + y == y + x%0A
|
|
3a4cb29e91008225c057feb3811e93b59f99d941
|
use flask-mail
|
application.py
|
application.py
|
Python
| 0.000001 |
@@ -0,0 +1,580 @@
+from flask import Flask%0Afrom flask.ext.mail import Mail, Message%0A%0Amail = Mail()%0Aapp = Flask(__name__)%0Aapp.config.update(%0A MAIL_SERVER='smtp.gmail.com',%0A MAIL_PORT='465',%0A MAIL_USE_SSL=True,%0A MAIL_USERNAME='[email protected]',%0A MAIL_PASSWORD='H3rpD3rpL0l')%0Amail.init_app(app)%0A%[email protected](%22/%22)%0Adef index():%0A msg = Message(%22Hello%22, sender=(%22flask%22, %[email protected]%22), recipients=%5B%[email protected]%22%5D)%0A msg.body = %22testing%22%0A msg.html = %22%3Cb%3Etesting%3C/b%3E%22%0A mail.send(msg)%0A return msg.html%0A%0Aif __name__ == '__main__':%0A app.run()%0A%0A
|
|
21a504dce25a1b22bda27cd74a443af98b24ad14
|
Add pseudo filter combining pypandoc and panflute
|
filters/extract_urls.py
|
filters/extract_urls.py
|
Python
| 0 |
@@ -0,0 +1,610 @@
+import io%0D%0A%0D%0Aimport pypandoc%0D%0Aimport panflute%0D%0A%0D%0A%0D%0Adef prepare(doc):%0D%0A%09doc.images = %5B%5D%0D%0A%09doc.links = %5B%5D%0D%0A%0D%0A%0D%0Adef action(elem, doc):%0D%0A if isinstance(elem, panflute.Image):%0D%0A %09doc.images.append(elem)%0D%0A elif isinstance(elem, panflute.Link):%0D%0A %09doc.links.append(elem)%0D%0A%0D%0A%0D%0Aif __name__ == '__main__':%0D%0A%09data = pypandoc.convert_file('example.md', 'json')%0D%0A%09f = io.StringIO(data)%0D%0A%09doc = panflute.load(f)%0D%0A%09doc = panflute.run_filter(action, prepare=prepare, doc=doc)%0D%0A%09%0D%0A%09print(%22%5CnImages:%22)%0D%0A%09for image in doc.images:%0D%0A%09%09print(image.url)%0D%0A%0D%0A%09print(%22%5CnLinks:%22)%0D%0A%09for link in doc.links:%0D%0A%09%09print(link.url)%0D%0A
|
|
b811bb9e9469a23921f841d4bfe3b52928a83e14
|
Create b.py
|
at/abc126/b.py
|
at/abc126/b.py
|
Python
| 0.000018 |
@@ -0,0 +1,310 @@
+read = input%0As = read()%0Aa, b = map(int , %5Bs%5B:2%5D, s%5B2:%5D%5D)%0AYYMM = False%0AMMYY = False%0Aif 1 %3C= b and b %3C= 12:%0A YYMM = True%0Aif 1 %3C= a and a %3C= 12:%0A MMYY = True%0Aif YYMM and MMYY :%0A print('AMBIGUOUS')%0Aelif YYMM and not MMYY:%0A print('YYMM')%0Aelif not YYMM and MMYY:%0A print('MMYY')%0Aelse :%0A print('NA')%0A
|
|
32c025a217f7771be94976fda6ede2d80855b4b6
|
Move things to new units module
|
pyatmlab/units.py
|
pyatmlab/units.py
|
Python
| 0.000001 |
@@ -0,0 +1,622 @@
+%22%22%22Various units-related things%0A%22%22%22%0A%0Afrom pint import (UnitRegistry, Context)%0Aureg = UnitRegistry()%0Aureg.define(%22micro- = 1e-6 = %C2%B5-%22)%0A%0A# aid conversion between different radiance units%0Asp2 = Context(%22radiance%22)%0Asp2.add_transformation(%0A %22%5Blength%5D * %5Bmass%5D / %5Btime%5D ** 3%22,%0A %22%5Bmass%5D / %5Btime%5D ** 2%22,%0A lambda ureg, x: x / ureg.speed_of_light)%0Asp2.add_transformation(%0A %22%5Bmass%5D / %5Btime%5D ** 2%22,%0A %22%5Blength%5D * %5Bmass%5D / %5Btime%5D ** 3%22,%0A lambda ureg, x: x * ureg.speed_of_light)%0Aureg.add_context(sp2)%0A%0Aradiance_units = %7B%0A %22si%22: ureg.W/(ureg.m**2*ureg.sr*ureg.Hz),%0A %22ir%22: ureg.mW/(ureg.m**2*ureg.sr*(1/ureg.cm))%7D%0A
|
|
c473d392e15912b68f09e20a329758be7ffe7930
|
Fix 'yotta version' in Windows
|
yotta/lib/vcs.py
|
yotta/lib/vcs.py
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import os
import subprocess
import tempfile
import logging
import hgapi
import errno
# fsutils, , misc filesystem utils, internal
import fsutils
git_logger = logging.getLogger('git')
hg_logger = logging.getLogger('hg')
class VCSError(Exception):
def __init__(self, message, returncode=None):
super(VCSError, self).__init__(message)
self.returncode = returncode
class VCS(object):
@classmethod
def cloneToTemporaryDir(cls, remote):
raise NotImplementedError()
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def commit(self, message, tag=None):
raise NotImplementedError()
def isClean(self):
raise NotImplementedError()
def tags(self):
raise NotImplementedError()
def markForCommit(self, path):
pass
def remove(self):
raise NotImplementedError()
def __nonzero__(self):
raise NotImplementedError()
# python 3 truthiness
def __bool__(self):
return self.__nonzero__()
class Git(VCS):
def __init__(self, path):
self.worktree = path
self.gitdir = os.path.join(path, '.git')
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
commands = [
['git', 'clone', remote, directory]
]
cls._execCommands(commands)
r = Git(directory)
if tag is not None:
r.updateToTag(tag)
return r
def fetchAllBranches(self):
remote_branches = []
local_branches = []
# list remote branches
out, err = self._execCommands([self._gitCmd('branch', '-r')])
for line in out.split(b'\n'):
branch_info = line.split(b' -> ')
# skip HEAD:
if len(branch_info) > 1:
continue
remote_branch = branch_info[0].strip()
branch = b'/'.join(remote_branch.split(b'/')[1:])
remote_branches.append((remote_branch, branch))
# list already-existing local branches
out, err = self._execCommands([self._gitCmd('branch')])
for line in out.split(b'\n'):
local_branches.append(line.strip(b' *'))
for remote, branchname in remote_branches:
# don't try to replace existing local branches
if branchname in local_branches:
continue
try:
out, err = self._execCommands([
self._gitCmd('checkout', '-b', branchname, remote)
])
except VCSError as e:
git_logger.error('failed to fetch remote branch %s %s' % (remote, branchname))
raise
def remove(self):
fsutils.rmRf(self.worktree)
def workingDirectory(self):
return self.worktree
def _gitCmd(self, *args):
return ['git','--work-tree=%s' % self.worktree,'--git-dir=%s'%self.gitdir] + list(args);
@classmethod
def _execCommands(cls, commands):
out, err = None, None
for cmd in commands:
try:
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
except OSError as e:
if e.errno == errno.ENOENT:
if cmd[0] == 'git':
raise VCSError(
'git is not installed, or not in your path. Please follow the installation instructions at http://docs.yottabuild.org/#installing'
)
else:
raise VCSError('%s is not installed' % (cmd[0]))
else:
raise VCSError('command %s failed' % (cmd))
out, err = child.communicate()
returncode = child.returncode
if returncode:
raise VCSError("command failed: %s:%s" % (cmd, err or out), returncode=returncode)
return out, err
def isClean(self):
commands = [
self._gitCmd('diff', '--quiet', '--exit-code'),
self._gitCmd('diff', '--cached', '--quiet', '--exit-code'),
]
try:
out, err = self._execCommands(commands)
except VCSError as e:
if e.returncode:
return False
else:
raise
return True
def markForCommit(self, relative_path):
commands = [
self._gitCmd('add', os.path.join(self.worktree, relative_path)),
]
self._execCommands(commands)
def updateToTag(self, tag):
commands = [
self._gitCmd('checkout', tag),
]
self._execCommands(commands)
def tags(self):
commands = [
self._gitCmd('tag', '-l')
]
out, err = self._execCommands(commands)
# I think utf-8 is the right encoding? commit messages are utf-8
# encoded, couldn't find any documentation on tag names.
return out.decode('utf-8').split(u'\n')
def branches(self):
commands = [
self._gitCmd('branch', '--list')
]
out, err = self._execCommands(commands)
return [x.lstrip(' *') for x in out.decode('utf-8').split('\n')]
def commit(self, message, tag=None):
commands = [
self._gitCmd('commit', '-m', message),
]
if tag:
commands.append(
self._gitCmd('tag', tag),
)
self._execCommands(commands)
def __nonzero__(self):
return True
# FIXME: hgapi will throw HgException when something goes wrong, it may be worth trying
# to catch that in some methods
class HG(VCS):
def __init__(self, path):
self.worktree = path
self.repo = hgapi.Repo(path)
@classmethod
def cloneToTemporaryDir(cls, remote):
return cls.cloneToDirectory(remote, tempfile.mkdtemp())
@classmethod
def cloneToDirectory(cls, remote, directory, tag=None):
# hg doesn't automatically create the directories needed by destination
try:
os.makedirs(directory)
except:
pass
hg_logger.debug('will clone %s into %s', remote, directory)
hgapi.Repo.hg_clone(remote, directory)
r = HG(directory)
if tag is not None:
r.updateToTag(tag)
return r
def remove(self):
fsutils.rmRf(self.worktree)
def workingDirectory(self):
return self.worktree
def isClean(self):
return not bool(self.repo.hg_status(empty=True))
def markForCommit(self, relative_path):
self.repo.hg_add(os.path.join(self.worktree, relative_path))
def updateToTag(self, tag):
self.repo.hg_update(tag)
def tags(self):
l = list(self.repo.hg_tags().keys())
l.remove('tip')
return l
def commit(self, message, tag=None):
self.repo.hg_commit(message)
if tag:
self.repo.hg_tag(tag)
def __nonzero__(self):
return True
def getVCS(path):
# crude heuristic, does the job...
if os.path.exists(os.path.join(path, '.git')):
return Git(path)
if os.path.isdir(os.path.join(path, '.hg')):
return HG(path)
return None
|
Python
| 0.000048 |
@@ -3291,16 +3291,35 @@
f.gitdir
+.replace('%5C%5C', '/')
%5D + list
|
b0652c0dff90d68a6a7cf84b536e7e539e344f74
|
Fix for bug 902175
|
quantum/db/api.py
|
quantum/db/api.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 Nicira Networks, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Somik Behera, Nicira Networks, Inc.
# @author: Brad Hall, Nicira Networks, Inc.
# @author: Dan Wendlandt, Nicira Networks, Inc.
import logging
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, exc
from quantum.common import exceptions as q_exc
from quantum.db import models
_ENGINE = None
_MAKER = None
BASE = models.BASE
LOG = logging.getLogger('quantum.db.api')
def configure_db(options):
"""
Establish the database, create an engine if needed, and
register the models.
:param options: Mapping of configuration options
"""
global _ENGINE
if not _ENGINE:
_ENGINE = create_engine(options['sql_connection'],
echo=False,
echo_pool=True,
pool_recycle=3600)
register_models()
def clear_db():
global _ENGINE
assert _ENGINE
for table in reversed(BASE.metadata.sorted_tables):
_ENGINE.execute(table.delete())
def get_session(autocommit=True, expire_on_commit=False):
"""Helper method to grab session"""
global _MAKER, _ENGINE
if not _MAKER:
assert _ENGINE
_MAKER = sessionmaker(bind=_ENGINE,
autocommit=autocommit,
expire_on_commit=expire_on_commit)
return _MAKER()
def register_models():
"""Register Models and create properties"""
global _ENGINE
assert _ENGINE
BASE.metadata.create_all(_ENGINE)
def unregister_models():
"""Unregister Models, useful clearing out data before testing"""
global _ENGINE
assert _ENGINE
BASE.metadata.drop_all(_ENGINE)
def network_create(tenant_id, name):
session = get_session()
with session.begin():
net = models.Network(tenant_id, name)
session.add(net)
session.flush()
return net
def network_list(tenant_id):
session = get_session()
return session.query(models.Network).\
filter_by(tenant_id=tenant_id).\
all()
def network_get(net_id):
session = get_session()
try:
return session.query(models.Network).\
filter_by(uuid=net_id).\
one()
except exc.NoResultFound, e:
raise q_exc.NetworkNotFound(net_id=net_id)
def network_update(net_id, tenant_id, **kwargs):
session = get_session()
net = network_get(net_id)
for key in kwargs.keys():
net[key] = kwargs[key]
session.merge(net)
session.flush()
return net
def network_destroy(net_id):
session = get_session()
try:
net = session.query(models.Network).\
filter_by(uuid=net_id).\
one()
ports = session.query(models.Port).\
filter_by(network_id=net_id).\
all()
for p in ports:
session.delete(p)
session.delete(net)
session.flush()
return net
except exc.NoResultFound:
raise q_exc.NetworkNotFound(net_id=net_id)
def port_create(net_id, state=None):
# confirm network exists
network_get(net_id)
session = get_session()
with session.begin():
port = models.Port(net_id)
port['state'] = state or 'DOWN'
session.add(port)
session.flush()
return port
def port_list(net_id):
# confirm network exists
network_get(net_id)
session = get_session()
return session.query(models.Port).\
filter_by(network_id=net_id).\
all()
def port_get(port_id, net_id):
# confirm network exists
network_get(net_id)
session = get_session()
try:
return session.query(models.Port).\
filter_by(uuid=port_id).\
filter_by(network_id=net_id).\
one()
except exc.NoResultFound:
raise q_exc.PortNotFound(net_id=net_id, port_id=port_id)
def port_update(port_id, net_id, **kwargs):
# confirm network exists
network_get(net_id)
port = port_get(port_id, net_id)
session = get_session()
for key in kwargs.keys():
if key == "state":
if kwargs[key] not in ('ACTIVE', 'DOWN'):
raise q_exc.StateInvalid(port_state=kwargs[key])
port[key] = kwargs[key]
session.merge(port)
session.flush()
return port
def port_set_attachment(port_id, net_id, new_interface_id):
# confirm network exists
network_get(net_id)
session = get_session()
port = port_get(port_id, net_id)
if new_interface_id != "":
# We are setting, not clearing, the attachment-id
if port['interface_id']:
raise q_exc.PortInUse(net_id=net_id, port_id=port_id,
att_id=port['interface_id'])
try:
port = session.query(models.Port).\
filter_by(interface_id=new_interface_id).\
one()
raise q_exc.AlreadyAttached(net_id=net_id,
port_id=port_id,
att_id=new_interface_id,
att_port_id=port['uuid'])
except exc.NoResultFound:
# this is what should happen
pass
port.interface_id = new_interface_id
session.merge(port)
session.flush()
return port
def port_unset_attachment(port_id, net_id):
# confirm network exists
network_get(net_id)
session = get_session()
port = port_get(port_id, net_id)
port.interface_id = None
session.merge(port)
session.flush()
def port_destroy(port_id, net_id):
# confirm network exists
network_get(net_id)
session = get_session()
try:
port = session.query(models.Port).\
filter_by(uuid=port_id).\
filter_by(network_id=net_id).\
one()
if port['interface_id']:
raise q_exc.PortInUse(net_id=net_id, port_id=port_id,
att_id=port['interface_id'])
session.delete(port)
session.flush()
return port
except exc.NoResultFound:
raise q_exc.PortNotFound(port_id=port_id)
|
Python
| 0.000001 |
@@ -4182,32 +4182,46 @@
(port_id, net_id
+, session=None
):%0A # confirm
@@ -4252,32 +4252,56 @@
ork_get(net_id)%0A
+ if not session:%0A
session = ge
@@ -4328,33 +4328,32 @@
%0A return
-
session.query(mo
@@ -6134,24 +6134,33 @@
t_id, net_id
+, session
)%0A port.i
@@ -6182,37 +6182,35 @@
one%0A session.
-merge
+add
(port)%0A sessi
|
50494947bdf7fc8fce50cb5f589c84fd48db4b05
|
test perm using py.test #1150
|
login/tests/fixture.py
|
login/tests/fixture.py
|
Python
| 0 |
@@ -0,0 +1,1751 @@
+# -*- encoding: utf-8 -*-%0Aimport pytest%0A%0Afrom login.tests.factories import (%0A TEST_PASSWORD,%0A UserFactory,%0A)%0A%0A%0Aclass PermTest:%0A%0A def __init__(self, client):%0A setup_users()%0A self.client = client%0A%0A def anon(self, url):%0A self.client.logout()%0A response = self.client.get(url)%0A assert 200 == response.status_code%0A%0A def staff(self, url):%0A # check anon user cannot login%0A self.client.logout()%0A response = self.client.get(url)%0A assert 302 == response.status_code%0A assert 'accounts/login' in response%5B'Location'%5D%0A # check web user cannot login%0A assert self.client.login(username='web', password=TEST_PASSWORD)%0A assert 302 == response.status_code%0A assert 'accounts/login' in response%5B'Location'%5D%0A # check staff user can login%0A assert self.client.login(username='staff', password=TEST_PASSWORD)%0A response = self.client.get(url)%0A assert 200 == response.status_code%0A%0A%[email protected]%0Adef perm_check(client):%0A %22%22%22Check permissions on a URL.%0A%0A We use a clever trick to pass parameters to the fixture. For details:%0A py.test: Pass a parameter to a fixture function%0A http://stackoverflow.com/questions/18011902/py-test-pass-a-parameter-to-a-fixture-function%0A%0A %22%22%22%0A return PermTest(client)%0A%0A%0Adef setup_users():%0A %22%22%22Using factories - set-up users for permissions test cases.%22%22%22%0A UserFactory(%0A username='admin',%0A email='[email protected]',%0A is_staff=True,%0A is_superuser=True%0A )%0A UserFactory(username='staff', email='[email protected]', is_staff=True)%0A UserFactory(%0A username='web', email='[email protected]',%0A first_name='William', last_name='Webber'%0A )%0A
|
|
1a98ccfbff406509d9290e76bbdf8edbb862fc1d
|
Solve orderred dict
|
python/py-collections-ordereddict.py
|
python/py-collections-ordereddict.py
|
Python
| 0.999999 |
@@ -0,0 +1,477 @@
+from collections import OrderedDict%0A%0Ad = OrderedDict()%0Anumber_of_items = int(input().strip())%0Afor i in range(number_of_items):%0A item, delimeter, price = input().strip().rpartition(%22 %22)%0A price = int(price)%0A if (item in d):%0A previous_total_purchased = d.get(item)%0A next_total_purchased = previous_total_purchased + price%0A d%5Bitem%5D = next_total_purchased%0A else:%0A d%5Bitem%5D = price%0A%0A%0Afor item, price in d.items():%0A print (f'%7Bitem%7D %7Bprice%7D')%0A
|
|
fa4155114304d1ebc9e3bb04f546ce7d4708c381
|
Add simple pipeline
|
pykit/pipeline.py
|
pykit/pipeline.py
|
Python
| 0.000001 |
@@ -0,0 +1,676 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22%0APipeline that determines phase ordering and execution.%0A%22%22%22%0A%0Afrom __future__ import print_function, division, absolute_import%0Aimport types%0A%0Acpy = %7B%0A 'lower_convert': lower_convert,%0A%7D%0A%0Alower = %7B%0A%0A%7D%0A%0A# ______________________________________________________________________%0A# Execute pipeline%0A%0Adef apply_transform(transform, func, env):%0A if isinstance(transform, types.ModuleType):%0A return transform.run(func, env)%0A else:%0A return transform(func, env)%0A%0Adef run(transforms, order, func, env):%0A for transform in order:%0A if transform in transforms:%0A func, env = apply_transform(transforms%5Btransform%5D, func, env)
|
|
8f81b90e81afca03dd591426b1197dbab74e1adf
|
Add an import/export handler for Audit->Auditors
|
src/ggrc_basic_permissions/converters/handlers.py
|
src/ggrc_basic_permissions/converters/handlers.py
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: [email protected]
# Maintained By: [email protected]
from sqlalchemy import and_
from ggrc import db
from ggrc.converters import errors
from ggrc.converters.handlers import UserColumnHandler
from ggrc.login import get_current_user
from ggrc.models import Person
from ggrc_basic_permissions.models import Role
from ggrc_basic_permissions.models import UserRole
class ObjectRoleColumnHandler(UserColumnHandler):
role = -1
owner_columns = ("program_owner")
def parse_item(self):
users = self.get_users_list()
if not users and self.key in self.owner_columns:
self.add_warning(errors.OWNER_MISSING)
users.append(get_current_user())
return list(users)
def set_obj_attr(self):
pass
def get_value(self):
user_role_ids = db.session.query(UserRole.person_id).filter_by(
role=self.role,
context_id=self.row_converter.obj.context_id)
users = Person.query.filter(Person.id.in_(user_role_ids))
emails = [user.email for user in users]
return "\n".join(emails)
def remove_current_roles(self):
UserRole.query.filter_by(
role=self.role,
context_id=self.row_converter.obj.context_id).delete()
def insert_object(self):
if self.dry_run or not self.value:
return
self.remove_current_roles()
for owner in self.value:
user_role = UserRole(
role=self.role,
context=self.row_converter.obj.context,
person=owner
)
db.session.add(user_role)
self.dry_run = True
class ProgramOwnerColumnHandler(ObjectRoleColumnHandler):
def __init__(self, row_converter, key, **options):
self.role = Role.query.filter_by(name="ProgramOwner").one()
super(self.__class__, self).__init__(row_converter, key, **options)
class ProgramEditorColumnHandler(ObjectRoleColumnHandler):
def __init__(self, row_converter, key, **options):
self.role = Role.query.filter_by(name="ProgramEditor").one()
super(self.__class__, self).__init__(row_converter, key, **options)
class ProgramReaderColumnHandler(ObjectRoleColumnHandler):
def __init__(self, row_converter, key, **options):
self.role = Role.query.filter_by(name="ProgramReader").one()
super(self.__class__, self).__init__(row_converter, key, **options)
class WorkflowOwnerColumnHandler(ObjectRoleColumnHandler):
def __init__(self, row_converter, key, **options):
self.role = Role.query.filter_by(name="WorkflowOwner").one()
super(self.__class__, self).__init__(row_converter, key, **options)
class WorkflowMemberColumnHandler(ObjectRoleColumnHandler):
def __init__(self, row_converter, key, **options):
self.role = Role.query.filter_by(name="WorkflowMember").one()
super(self.__class__, self).__init__(row_converter, key, **options)
class UserRoleColumnHandler(UserColumnHandler):
_role_map = {
"admin": "ggrc admin"
}
_allowed_roles = [
"Reader",
"Editor",
"gGRC Admin",
]
def parse_item(self):
value = self.raw_value.lower()
name = self._role_map.get(value, value)
return Role.query.filter_by(name=name).first()
def set_obj_attr(self):
pass
def get_value(self):
return self.row_converter.obj.system_wide_role
def remove_current_roles(self):
allowed_role_ids = db.session.query(Role.id).filter(
Role.name.in_(self._allowed_roles))
UserRole.query.filter(and_(
UserRole.role_id.in_(allowed_role_ids),
UserRole.person_id == self.row_converter.obj.id)
).delete(synchronize_session="fetch")
def insert_object(self):
if self.dry_run or not self.value:
return
self.remove_current_roles()
user_role = UserRole(
role=self.value,
person=self.row_converter.obj
)
db.session.add(user_role)
self.dry_run = True
COLUMN_HANDLERS = {
"program_editor": ProgramEditorColumnHandler,
"program_owner": ProgramOwnerColumnHandler,
"program_reader": ProgramReaderColumnHandler,
"user_role": UserRoleColumnHandler,
"workflow_member": WorkflowMemberColumnHandler,
"workflow_owner": WorkflowOwnerColumnHandler,
}
|
Python
| 0 |
@@ -2945,24 +2945,269 @@
*options)%0A%0A%0A
+class AuditAuditorColumnHandler(ObjectRoleColumnHandler):%0A%0A def __init__(self, row_converter, key, **options):%0A self.role = Role.query.filter_by(name=%22Auditor%22).one()%0A super(self.__class__, self).__init__(row_converter, key, **options)%0A%0A%0A
class UserRo
@@ -4412,32 +4412,84 @@
eColumnHandler,%0A
+ %22user_role:Auditor%22: AuditAuditorColumnHandler,%0A
%22workflow_me
|
141005c72b1686d73cdc581e9ee8313529e11e4c
|
Add health check script.
|
tools/health-check.py
|
tools/health-check.py
|
Python
| 0 |
@@ -0,0 +1,2470 @@
+#!/usr/bin/python%0A%0A# Health check script that examines the /status/ URI and sends mail on any%0A# condition other than 200/OK.%0A# Configuration is via environment variables:%0A# * POWERMON_STATUS - absolute URL to /status/ URI%0A# * POWERMON_SMTPHOST - SMTP host name used to send mail%0A# * POWERMON_MAILTO - email address where problem reports are sent%0A%0Afrom email.mime.text import MIMEText%0Afrom httplib import HTTPConnection, HTTPSConnection%0Afrom os import environ%0Afrom os.path import basename%0Afrom smtplib import SMTP%0Afrom urlparse import urlparse%0Aimport sys%0A%0A%0Adef getenvars(*vars):%0A %22%22%22Returns the values of one or more environment variables.%22%22%22%0A values = %5B%5D%0A for var in vars:%0A if not var in environ:%0A die('%25s environment variable not defined' %25 var)%0A values.append(environ%5Bvar%5D)%0A return tuple(values)%0A%0A%0Adef die_err(e, message):%0A %22%22%22Displays exception details and a message then exits program.%22%22%22%0A print message%0A print e%0A sys.exit(1)%0A%0A%0Adef die(message): %0A %22%22%22Displays a message then exits program.%22%22%22%0A print message%0A sys.exit(1)%0A%0A%0Adef http_get(url):%0A %22%22%22Returns the tuple (status, response body) for a GET request to the given URL.%22%22%22%0A conn = None%0A headers = %7B%0A 'Accept': 'text/plain, text/html, text/xml',%0A 'Content-Length': 0,%0A 'User-Agent': 'Python/%25s.%25s.%25s' %25 sys.version_info%5B0:3%5D%0A %7D%0A result = urlparse(url)%0A try :%0A if result.scheme == 'https':%0A conn = HTTPSConnection(result.netloc)%0A else:%0A conn = HTTPConnection(result.netloc)%0A conn.request('GET', url, %22%22, headers)%0A response = conn.getresponse()%0A return (response.status, str(response.read()))%0A except Exception, e:%0A die_err(e, 'HTTP GET failed:')%0A finally:%0A if conn: conn.close()%0A%0A%0Adef send_mail(mfrom, mto, body, smtp_host):%0A %22%22%22Sends a health check failure notice to the designated recipient.%22%22%22%0A msg = MIMEText(body)%0A msg%5B'Subject'%5D = 'Powermon Health Check Failure'%0A msg%5B'From'%5D = mfrom%0A msg%5B'To'%5D = mto%0A s = SMTP(smtp_host)%0A try:%0A s.sendmail(mfrom, %5Bmto%5D, msg.as_string())%0A finally:%0A s.quit%0A%0A%0A(status_url, mailto, smtp_host) = getenvars(%0A 'POWERMON_STATUS', 'POWERMON_MAILTO', 'POWERMON_SMTPHOST')%0A%0Ahostname = 'localhost'%0Aif 'HOSTNAME' in environ:%0A hostname = environ%5B'HOSTNAME'%5D%0Amailfrom = '%25s@%25s' %25 (environ%5B'USER'%5D, hostname)%0A%0Aprint 'Checking', status_url%0A(status, body) = http_get(status_url)%0Aprint body%0Aif status %3E 200:%0A print 'Sending failure notice to', mailto%0A send_mail(mailfrom, mailto, body, smtp_host)%0A
|
|
210eba35fc4473e626fc58a8e4ea3cdbb6abdc28
|
add undocumented function to display new messages.
|
rtv/docs.py
|
rtv/docs.py
|
from .__version__ import __version__
__all__ = ['AGENT', 'SUMMARY', 'AUTH', 'CONTROLS', 'HELP', 'COMMENT_FILE',
'SUBMISSION_FILE', 'COMMENT_EDIT_FILE']
AGENT = """\
desktop:https://github.com/michael-lazar/rtv:{} (by /u/civilization_phaze_3)\
""".format(__version__)
SUMMARY = """
Reddit Terminal Viewer is a lightweight browser for www.reddit.com built into a
terminal window.
"""
AUTH = """\
Authenticating is required to vote and leave comments. If only a username is
given, the program will display a secure prompt to enter a password.
"""
CONTROLS = """
Controls
--------
RTV currently supports browsing both subreddits and individual submissions.
In each mode the controls are slightly different. In subreddit mode you can
browse through the top submissions on either the front page or a specific
subreddit. In submission mode you can view the self text for a submission and
browse comments.
"""
HELP = """
Basic Commands
`j/k` or `UP/DOWN` : Move the cursor up/down
`m/n` or `PgUp/PgDn`: Jump to the previous/next page
`o` or `ENTER` : Open the selected item as a webpage
`r` or `F5` : Refresh page content
`u` : Log in or switch accounts
`?` : Show the help screen
`q` : Quit
Authenticated Commands
`a/z` : Upvote/downvote
`c` : Compose a new post or comment
`e` : Edit an existing post or comment
`d` : Delete an existing post or comment
`s` : Open/close subscribed subreddits list
Subreddit Mode
`l` or `RIGHT` : Enter the selected submission
`/` : Open a prompt to switch subreddits
`f` : Open a prompt to search the current subreddit
Submission Mode
`h` or `LEFT` : Return to subreddit mode
`SPACE` : Fold the selected comment, or load additional comments
"""
COMMENT_FILE = u"""
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Replying to {author}'s {type}
{content}
"""
COMMENT_EDIT_FILE = u"""{content}
# Please enter a comment. Lines starting with '#' will be ignored,
# and an empty message aborts the comment.
#
# Editing your comment
"""
SUBMISSION_FILE = u"""{content}
# Please enter your submission. Lines starting with '#' will be ignored,
# and an empty field aborts the submission.
#
# The first line will be interpreted as the title
# The following lines will be interpreted as the content
#
# Posting to {name}
"""
|
Python
| 0 |
@@ -1196,16 +1196,68 @@
ccounts%0A
+ %60i%60 : Display new messages prompt%0A
%60?%60
|
04287120372a6fdb906ed9f27ead4c5f91d5690e
|
Add a modified version of simple bot
|
tota/heroes/lenovo.py
|
tota/heroes/lenovo.py
|
Python
| 0 |
@@ -0,0 +1,2680 @@
+from tota.utils import closest, distance, sort_by_distance, possible_moves%0Afrom tota import settings%0A%0A__author__ = %22angvp%22%0A%0A%0Adef create():%0A%0A def lenovo_hero_logic(self, things, t):%0A # some useful data about the enemies I can see in the map%0A enemy_team = settings.ENEMY_TEAMS%5Bself.team%5D%0A enemies = %5Bthing for thing in things.values()%0A if thing.team == enemy_team%5D%0A closest_enemy = closest(self, enemies)%0A closest_enemy_distance = distance(self, closest_enemy)%0A%0A real_life = (self.life / self.max_life) * 100%0A%0A # now lets decide what to do%0A if int(real_life) %3C 85 and self.can('heal', t):%0A # if I'm hurt and can heal, heal%0A if closest_enemy_distance %3C= settings.HEAL_DISTANCE and self.can('fireball', t):%0A return 'fireball', closest_enemy.position%0A elif closest_enemy_distance %3C= settings.HEAL_DISTANCE and self.can('stun', t):%0A return 'stun', closest_enemy.position%0A elif closest_enemy_distance %3C= settings.HEAL_DISTANCE and self.can('attack', t):%0A return 'attack', closest_enemy.position%0A else:%0A return 'heal', self.position%0A else:%0A # else, try to attack%0A if closest_enemy:%0A # there is an enemy%0A if closest_enemy_distance %3C= settings.STUN_DISTANCE and self.can('stun', t):%0A # try to stun him%0A return 'stun', closest_enemy.position%0A if closest_enemy_distance %3C= settings.FIREBALL_DISTANCE and self.can('fireball', t) and closest_enemy_distance %3E settings.FIREBALL_RADIUS:%0A # else try to fireball him, but only if I'm not in range%0A return 'fireball', closest_enemy.position%0A elif closest_enemy_distance %3C= settings.HERO_ATTACK_DISTANCE:%0A # else try to attack him%0A return 'attack', closest_enemy.position%0A else:%0A # of finally just move to him (if I have life %3E 50)%0A moves = sort_by_distance(closest_enemy,%0A possible_moves(self, things))%0A if len(moves) %3E 0:%0A back_moves = moves%5Blen(moves)-1%5D%0A else:%0A back_moves = self.position%0A if moves and int(real_life) %3E 50:%0A return 'move', moves%5B0%5D%0A else:%0A return 'move', back_moves%0A%0A%0A # can't do the things I want. Do nothing.%0A return None%0A%0A%0A return lenovo_hero_logic%0A
|
|
2f7d5f30fd6b6cb430c55b21d7cab75800bcfe97
|
Add a little hacky highlighter
|
screencasts/hello-weave/highlight.py
|
screencasts/hello-weave/highlight.py
|
Python
| 0.000005 |
@@ -0,0 +1,1757 @@
+import json%0A%0Aprompt = 'ilya@weave-01:~$ '%0A%0Ahighlight = %5B%0A ('weave-01', 'red'),%0A ('weave-02', 'red'),%0A ('docker', 'red'),%0A ('run', 'red'),%0A ('--name', 'red'),%0A ('hello', 'red'),%0A ('netcat', 'red'),%0A ('-lk', 'red'),%0A ('1234', 'red'),%0A ('sudo curl -s -L git.io/weave -o /usr/local/bin/weave', 'red'),%0A ('b4e40e4b4665a1ffa23f90eb3ab57c83ef243e64151bedc1501235df6e532e09%5Cr%5Cn', 'red'),%0A ('Hello, Weave!%5Cr%5Cn', 'red'),%0A%5D%0A%0Ahighlight_tokens = %5Bt%5B0%5D for t in highlight%5D%0A%0Atokens = %5B%5D%0A%0Acolours = %7B%0A 'red': ('%5C033%5B91m', '%5C033%5B00m'),%0A%7D%0A%0Afor f in %5B'rec-weave-01.json', 'rec-weave-02.json'%5D:%0A with open(f) as json_data:%0A d = json.load(json_data)%0A json_data.close()%0A commands = d%5B'stdout'%5D%0A word = ''%0A word_start = 0%0A for i,x in enumerate(commands):%0A curr = x%5B1%5D%0A if curr == prompt: continue%0A elif curr != '%5Cr%5Cn' and curr != ' ' and len(curr) == 1:%0A if word_start == 0:%0A word_start = i%0A word = curr%0A else:%0A word += curr%0A elif (curr == '%5Cr%5Cn' or curr == ' ') and word_start != 0:%0A tokens.append((word, word_start, True))%0A word_start = 0%0A elif curr != '%5Cr%5Cn' and len(curr) %3E 1:%0A tokens.append((curr, i, False))%0A %0A offset = 0%0A for x in tokens:%0A if x%5B0%5D in highlight_tokens:%0A commands.insert(x%5B1%5D + offset, %5B0, colours%5B'red'%5D%5B0%5D%5D)%0A offset += 1%0A l = len(x%5B0%5D) if x%5B2%5D else 1%0A commands.insert(x%5B1%5D + l + offset, %5B0, colours%5B'red'%5D%5B1%5D%5D)%0A offset += 1%0A %0A d%5B'commands'%5D = commands%0A %0A print(json.dumps(d))%0A
|
|
6b4733c213046c7a16bf255cfbc92408e2f01423
|
Add test for registry model hash
|
tests/models/test_authenticated_registry_model.py
|
tests/models/test_authenticated_registry_model.py
|
Python
| 0 |
@@ -0,0 +1,1214 @@
+import pytest%0A%0Afrom dockci.models.auth import AuthenticatedRegistry%0A%0A%0ABASE_AUTHENTICATED_REGISTRY = dict(%0A id=1,%0A display_name='Display name',%0A base_name='Base name',%0A username='Username',%0A password='Password',%0A email='Email',%0A insecure=False,%0A)%0A%0A%0Aclass TestHash(object):%0A %22%22%22 Test %60%60AuthenticatedRegistry.__hash__%60%60 %22%22%22%0A def test_hash_eq(self):%0A %22%22%22 Test when hash should be equal %22%22%22%0A left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)%0A right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)%0A%0A assert hash(left) == hash(right)%0A%0A @pytest.mark.parametrize('attr_name,attr_value', %5B%0A ('id', 7),%0A ('display_name', 'different'),%0A ('base_name', 'different'),%0A ('username', 'different'),%0A ('password', 'different'),%0A ('email', 'different'),%0A ('insecure', True),%0A %5D)%0A def test_hash_ne(self, attr_name, attr_value):%0A %22%22%22 Test when hash should be not equal %22%22%22%0A left = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)%0A right = AuthenticatedRegistry(**BASE_AUTHENTICATED_REGISTRY)%0A%0A setattr(right, attr_name, attr_value)%0A%0A assert hash(left) != hash(right)%0A
|
|
57dc7e58dcfd101c29026c8c07763cba2eb7dd14
|
add helper script to inspect comments on released content
|
scripts/show_comments.py
|
scripts/show_comments.py
|
Python
| 0 |
@@ -0,0 +1,665 @@
+#!/usr/bin/env python%0A%0Afrom __future__ import print_function%0Aimport sys%0A%0Adef main():%0A%09fs = open(sys.argv%5B1%5D).read().splitlines()%0A%09fs = map(lambda f: %7B'name':f, 'contents':open(f).readlines()%7D,fs)%0A%09for f in fs:%0A%09%09buffer = ''%0A%09%09multiline = 0%0A%09%09is_first = True%0A%09%09for i,line in enumerate(f%5B'contents'%5D,start=1):%0A%09%09%09multiline += line.count('(*')%0A%09%09%09if (line.count('//') %3E 0 or multiline %3E 0) and not is_first:%0A%09%09%09%09buffer += '%7B%7D: %7B%7D'.format(i,line)%0A%09%09%09closed = line.count('*)')%0A%09%09%09if closed %3E 0 and is_first:%0A%09%09%09%09is_first = False%0A%09%09%09multiline -= closed%0A%09%09if buffer:%0A%09%09%09print ('*** %7B%7D:'.format(f%5B'name'%5D))%0A%09%09%09print (buffer)%0A%09%09%09print ()%0A%0Aif __name__ == '__main__':%0A%09main()%0A
|
|
4b83b7a3d286f60454c96ae609ce18c731339877
|
add a stub fuse-based fs component
|
src/fs/nomadfs.py
|
src/fs/nomadfs.py
|
Python
| 0 |
@@ -0,0 +1,1510 @@
+#!/usr/bin/env python%0A#%0A# Copyright (c) 2015 Josef 'Jeff' Sipek %[email protected]%3E%0A#%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A#%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A#%0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE%0A# SOFTWARE.%0A#%0A%0Aimport errno%0Aimport fuse%0A%0Afuse.fuse_python_api = (0, 2)%0A%0Aclass Nomad(fuse.Fuse):%0A%09def __init__(self, *args, **kw):%0A%09%09fuse.Fuse.__init__(self, *args, **kw)%0A%0A%09def getattr(self, path):%0A%09%09return -errno.ENOSYS%0A%0A%09def getdir(self, path):%0A%09%09return -errno.ENOSYS%0A%0Aif __name__ == %22__main__%22:%0A%09fs = Nomad()%0A%09fs.flags = 0%0A%09fs.multithreaded = 0%0A%09fs.parse(errex=1)%0A%09fs.main()%0A
|
|
4efc50f91d2b141270739ea9f8bef9685cc86e7f
|
add houdini/shelf/fitcam
|
houdini/shelf/fitcam.py
|
houdini/shelf/fitcam.py
|
Python
| 0 |
@@ -0,0 +1,2486 @@
+# -*- coding: utf-8 -*-%0Aimport hou%0Aimport toolutils%0A%0Adef setfit(oldCam, resx, resy):%0A oldCam.setDisplayFlag(False)%0A%0A oldCam.parm(oldCam.path() + %22/resx%22).set(resx)%0A oldCam.parm(oldCam.path() + %22/resy%22).set(resy)%0A%0A camups = oldCam.inputAncestors()%0A if camups == ():%0A camup = oldCam%0A else:%0A camup = camups = oldCam.inputAncestors()%5B-1%5D%0A%0A null = hou.node('obj').createNode('null', 'ScaleWorld')%0A blend = hou.node('obj').createNode('blend', 'Blend_position')%0A fetch = hou.node('obj').createNode('fetch', 'Fetch_NewCam')%0A newCam = hou.node('obj').createNode('cam', 'Render_Camera')%0A%0A null.move(camup.position() + hou.Vector2(0, 1))%0A blend.move(oldCam.position() + hou.Vector2(0, -1))%0A fetch.move(oldCam.position() + hou.Vector2(0, -2))%0A newCam.move(oldCam.position() + hou.Vector2(0, -3))%0A%0A camup.setNextInput(null)%0A blend.setNextInput(oldCam)%0A fetch.setNextInput(blend)%0A newCam.setNextInput(fetch)%0A%0A null.setDisplayFlag(False)%0A blend.setDisplayFlag(False)%0A fetch.setDisplayFlag(False)%0A%0A blend.parm(blend.path() + %22/blendm1%22).set(63)%0A fetch.parm(fetch.path() + %22/useinputoffetched%22).set(1)%0A%0A oldCamPath = oldCam.path()%0A relativePath = newCam.relativePathTo(oldCam)%0A resx = %22 ch(%5C%22%22 + relativePath + %22/resx%5C%22)%22%0A resy = %22 ch(%5C%22%22 + relativePath + %22/resy%5C%22)%22%0A focal = %22 ch(%5C%22%22 + relativePath + %22/focal%5C%22)%22%0A aperture = %22 ch(%5C%22%22 + relativePath + %22/aperture%5C%22)%22%0A vm_background = %22 ch(%5C%22%22 + relativePath + %22/vm_background%5C%22)%22%0A%0A newCam.setParmExpressions(dict(resx=resx, resy=resy, focal=focal,%0A%09%09%09%09%09%09%09%09%09 aperture=aperture, vm_background=vm_background))%0A%0A newCam.parm(%22vm_bgenable%22).set(0)%0A newCam.parm(%22vm_bgenable%22).set(0)%0A newCam.parm(%22vm_bgenable%22).lock(True)%0A%0Adef main():%0A view = toolutils.sceneViewer()%0A sel = view.selectObjects('%E8%AF%B7%E9%80%89%E6%8B%A9%E4%B8%80%E4%B8%AA%E7%9B%B8%E6%9C%BA')%0A if len(sel) %3E 0:%0A if sel%5B0%5D.type().name()=='cam':%0A resolution = hou.ui.readInput('set Resolution',buttons = ('Set','close'),title = 'set Resolution',initial_contents = '1920-1080',close_choice = 1,default_choice = 0)%0A resx = resolution%5B1%5D.split('-')%5B0%5D%0A resy = resolution%5B1%5D.split('-')%5B1%5D%0A oldCam = sel%5B0%5D%0A if resolution%5B0%5D == 0:%0A setfit(oldCam, resx, resy)
|
|
1886af3e8c96108a8f7bdb320969373e66299bf4
|
Create __init__.py
|
python/django_standalone_orm/__init__.py
|
python/django_standalone_orm/__init__.py
|
Python
| 0.000429 |
@@ -0,0 +1 @@
+%0A
|
|
a60ee657a6f1a0479e88b0c9c0f10b204e02ab7c
|
fix import
|
tensorflow/contrib/tensorrt/test/test_tftrt.py
|
tensorflow/contrib/tensorrt/test/test_tftrt.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Script to test TF-TensorRT integration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
# normally we should do import tensorflow as tf and then
# tf.placeholder, tf.constant, tf.nn.conv2d etc but
# it looks like internal builds don't like it so
# importing every module individually
from tensorflow.contrib.tensorrt as trt
from tensorflow.core.protobuf import config_pb2 as cpb2
from tensorflow.python.client import session as csess
from tensorflow.python.framework import constant_op as cop
from tensorflow.python.framework import dtypes as dtypes
from tensorflow.python.framework import importer as importer
from tensorflow.python.framework import ops as ops
from tensorflow.python.ops import array_ops as aops
from tensorflow.python.ops import nn as nn
from tensorflow.python.ops import nn_ops as nn_ops
def get_simple_graph_def():
"""Create a simple graph and return its graph_def"""
g = ops.Graph()
with g.as_default():
a = aops.placeholder(dtype=dtypes.float32, shape=(None, 24, 24, 2), name="input")
e = cop.constant(
[[[[1., 0.5, 4., 6., 0.5, 1.], [1., 0.5, 1., 1., 0.5, 1.]]]],
name="weights",
dtype=dtypes.float32)
conv = nn.conv2d(
input=a, filter=e, strides=[1, 2, 2, 1], padding="SAME", name="conv")
b = cop.constant([4., 1.5, 2., 3., 5., 7.], name="bias", dtype=dtypes.float32)
t = nn.bias_add(conv, b, name="biasAdd")
relu = nn.relu(t, "relu")
idty = aops.identity(relu, "ID")
v = nn_ops.max_pool(
idty, [1, 2, 2, 1], [1, 2, 2, 1], "VALID", name="max_pool")
aops.squeeze(v, name="output")
return g.as_graph_def()
def run_graph(gdef, dumm_inp):
gpu_options = cbp2.GPUOptions(per_process_gpu_memory_fraction=0.50)
ops.reset_default_graph()
g = ops.Graph()
with g.as_default():
inp, out = importer.import_graph_def(
graph_def=gdef, return_elements=["input", "output"])
inp = inp.outputs[0]
out = out.outputs[0]
with csess.Session(
config=cbp2.ConfigProto(gpu_options=gpu_options), graph=g) as sess:
val = sess.run(out, {inp: dumm_inp})
return val
if "__main__" in __name__:
inp_dims = (100, 24, 24, 2)
dummy_input = np.random.random_sample(inp_dims)
gdef = get_simple_graph_def()
# Get optimized graph
trt_graph = trt.create_inference_graph(gdef, ["output"], inp_dims[0])
o1 = run_graph(gdef, dummy_input)
o2 = run_graph(trt_graph, dummy_input)
o3 = run_graph(trt_graph, dummy_input)
assert np.array_equal(o1, o2)
assert np.array_equal(o3, o2) # sanity check
print("Pass")
|
Python
| 0.000001 |
@@ -1078,17 +1078,24 @@
.contrib
-.
+ import
tensorrt
|
88548319d8a7c44d039ce269621f0a9ff4ee8af6
|
refactor leslie matrix; add leslie_exe.py
|
poptox/leslie/leslie_exe.py
|
poptox/leslie/leslie_exe.py
|
Python
| 0.000011 |
@@ -0,0 +1,2478 @@
+import numpy as np%0Aimport os.path%0Aimport pandas as pd%0Aimport sys%0A#find parent directory and import base (travis)%0Aparentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))%0Asys.path.append(parentddir)%0Afrom base.uber_model import UberModel, ModelSharedInputs%0A%0A# print(sys.path)%0A# print(os.path)%0A%0A%0Aclass LeslieInputs(ModelSharedInputs):%0A %22%22%22%0A Input class for Leslie.%0A %22%22%22%0A%0A def __init__(self):%0A %22%22%22Class representing the inputs for Leslie%22%22%22%0A super(LeslieInputs, self).__init__()%0A self.init_pop_size = pd.Series(%5B%5D, dtype=%22float%22)%0A self.stages = pd.Series(%5B%5D, dtype=%22float%22)%0A self.l_m = pd.Series(%5B%5D, dtype=%22float%22)%0A self.time_steps = pd.Series(%5B%5D, dtype=%22float%22)%0A%0A%0Aclass LeslieOutputs(object):%0A %22%22%22%0A Output class for Leslie.%0A %22%22%22%0A%0A def __init__(self):%0A %22%22%22Class representing the outputs for Leslie%22%22%22%0A super(LeslieOutputs, self).__init__()%0A self.out_pop_matrix = pd.Series(name=%22out_pop_matrix%22)%0A%0A%0Aclass Leslie(UberModel, LeslieInputs, LeslieOutputs):%0A %22%22%22%0A Leslie model for population growth.%0A %22%22%22%0A%0A def __init__(self, pd_obj, pd_obj_exp):%0A %22%22%22Class representing the Leslie model and containing all its methods%22%22%22%0A super(Leslie, self).__init__()%0A self.pd_obj = pd_obj%0A self.pd_obj_exp = pd_obj_exp%0A self.pd_obj_out = None%0A%0A def execute_model(self):%0A %22%22%22%0A Callable to execute the running of the model:%0A 1) Populate input parameters%0A 2) Create output DataFrame to hold the model outputs%0A 3) Run the model's methods to generate outputs%0A 4) Fill the output DataFrame with the generated model outputs%0A %22%22%22%0A self.populate_inputs(self.pd_obj, self)%0A self.pd_obj_out = self.populate_outputs(self)%0A self.run_methods()%0A self.fill_output_dataframe(self)%0A%0A # Begin model methods%0A def run_methods(self):%0A %22%22%22 Execute all algorithm methods for model logic %22%22%22%0A try:%0A self.leslie_grow()%0A except Exception as e:%0A print(str(e))%0A%0A def leslie_grow(self):%0A self.out_pop_matrix = np.zeros(shape=(self.stages, self.time_steps))%0A self.out_pop_matrix%5B:, 0%5D = self.init_pop_size%0A for i in range(1, self.time_steps):%0A n = np.dot(self.l_m, self.out_pop_matrix%5B:, i-1%5D)%0A self.out_pop_matrix%5B:, i%5D = n.squeeze()%0A return self.out_pop_matrix.tolist()%0A
|
|
d3d6a6018d55581bf081c93386f6676c8bb105ce
|
Add module for running the main simulation
|
simulate.py
|
simulate.py
|
Python
| 0 |
@@ -0,0 +1,330 @@
+import genetic%0Aimport sys%0A%0Aoutput = sys.stdout%0A%0Adef setOutput(out):%0A output = out%0A genetic.setOutput(output)%0A%0A# Test data for a XOR gate%0AtestData = (%0A (0.1, 0.1, 0.9),%0A (0.1, 0.9, 0.9),%0A (0.9, 0.1, 0.9),%0A (0.9, 0.9, 0.1)%0A)%0A%0Adef simulate():%0A sim = genetic.Simulation(2, 1, testData, 100)%0A sim.simulate(100)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.