commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
7c3ed589ace907a71931b79902382b168a2ae80d
|
add direct_link_downloader
|
downloaders/direct_link_downloader.py
|
downloaders/direct_link_downloader.py
|
Python
| 0.000001 |
@@ -0,0 +1,1192 @@
+import os%0Afrom urllib.request import urlopen%0A%0A%0A%0Adef direct_link_download(url, file_path):%0A %22%22%22%0A :param url: direct link to an image%0A :param file_path: file path (including filename) to save image to%0A %22%22%22%0A # make sure the file_path param doesn't point to a directory%0A if os.path.isdir(file_path):%0A raise ValueError(':param file_path: shouldn%5C't point to a directory')%0A%0A # make sure the file doesn't already exist%0A if os.path.isfile(file_path):%0A raise FileExistsError('%25s already exists' %25 file_path)%0A%0A # create path(s) for file_path if necessary%0A base_dir = os.path.dirname(file_path)%0A if not os.path.isdir(base_dir):%0A os.makedirs(os.path.abspath(base_dir))%0A%0A # download and save the image%0A req = urlopen(url).read()%0A with open(file_path, 'w') as f:%0A f.write(req)%0A%0A%0Aif __name__ == %22__main__%22:%0A # tests%0A dir1 = os.path.join(os.getcwd(), 'img1.jpg')%0A print(dir1)%0A url = 'http://i.imgur.com/2MlAOkC.jpg'%0A url2 = 'http://img05.deviantart.net/41ee/i/2013/299/9/f/_stock__mystic_woods_by_dominikaaniola-d2ehxq4.jpg'%0A direct_link_download(url, 'img1.jpg')%0A # direct_link_download(url2, './tmp/tmp2/img2.jpg')%0A
|
|
1455f6c563edd07a61dd826bde03137fff2d3f57
|
add data for recipe 1.8
|
code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/08-calculating_with_dictionaries/main.py
|
code/ch_1-DATA_STRUCTURES_AND_ALGORITHMS/08-calculating_with_dictionaries/main.py
|
Python
| 0.000001 |
@@ -0,0 +1,1312 @@
+def example_1():%0A prices = %7B%0A 'ACME': 45.23,%0A 'AAPL': 612.78,%0A 'IBM': 205.55,%0A 'HPQ': 37.20,%0A 'FB': 10.75%0A %7D%0A%0A min_price = min(zip(prices.values(), prices.keys()))%0A max_price = max(zip(prices.values(), prices.keys()))%0A print(min_price, max_price)%0A%0A sorted_prices = sorted(zip(prices.values(), prices.keys()))%0A print(sorted_prices)%0A%0A prices_and_names = zip(prices.values(), prices.keys())%0A print(min(prices_and_names))%0A try:%0A print(max(prices_and_names))%0A except ValueError:%0A print('here is ValueError')%0A%0A%0Adef example_2():%0A prices = %7B%0A 'ACME': 45.23,%0A 'AAPL': 612.78,%0A 'IBM': 205.55,%0A 'HPQ': 37.20,%0A 'FB': 10.75%0A %7D%0A%0A print(min(prices))%0A print(max(prices))%0A print(min(prices.values()))%0A print(max(prices.values()))%0A%0A print(min(prices, key = lambda k: prices%5Bk%5D))%0A print(max(prices, key = lambda k: prices%5Bk%5D))%0A%0A print(prices%5Bmin(prices, key = lambda k: prices%5Bk%5D)%5D)%0A print(prices%5Bmax(prices, key = lambda k: prices%5Bk%5D)%5D)%0A%0A%0Adef example_3():%0A prices = %7B 'AAA': 45.23, 'ZZZ': 45.23 %7D%0A print(min(zip(prices.values(), prices.keys())))%0A print(max(zip(prices.values(), prices.keys())))%0A%0A%0Aif __name__ == '__main__':%0A example_1()%0A example_2()%0A example_3()%0A%0A%0A
|
|
4879fc188c685e4676414a2f186d8d52998bc28d
|
Create task_3_3.py
|
PINp/2015/KAKURKIN_I_V/task_3_3.py
|
PINp/2015/KAKURKIN_I_V/task_3_3.py
|
Python
| 0.999993 |
@@ -0,0 +1,388 @@
+# %D0%97%D0%B0%D0%B4%D0%B0%D1%87%D0%B0 3. %D0%92%D0%B0%D1%80%D0%B8%D0%B0%D0%BD%D1%82 3.%0A# %D0%9D%D0%B0%D0%BF%D0%B8%D1%88%D0%B8%D1%82%D0%B5 %D0%BF%D1%80%D0%BE%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D1%83, %D0%BA%D0%BE%D1%82%D0%BE%D1%80%D0%B0%D1%8F %D0%B2%D1%8B%D0%B2%D0%BE%D0%B4%D0%B8%D1%82 %D0%B8%D0%BC%D1%8F %22%D0%A7%D0%B0%D1%80%D0%BB%D1%8C%D0%B7 %D0%9B%D1%8E%D1%82%D0%B2%D0%B8%D0%B4%D0%B6 %D0%94%D0%BE%D0%B4%D0%B6%D1%81%D0%BE%D0%BD%22,%0A# %D0%B8 %D0%B7%D0%B0%D0%BF%D1%80%D0%B0%D1%88%D0%B8%D0%B2%D0%B0%D0%B5%D1%82 %D0%B5%D0%B3%D0%BE %D0%BF%D1%81%D0%B5%D0%B2%D0%B4%D0%BE%D0%BD%D0%B8%D0%BC. %0A# %D0%9F%D1%80%D0%BE%D0%B3%D1%80%D0%B0%D0%BC%D0%BC%D0%B0 %D0%B4%D0%BE%D0%BB%D0%B6%D0%BD%D0%B0 %D1%81%D1%86%D0%B5%D0%BF%D0%BB%D1%8F%D1%82%D1%8C %D0%B4%D0%B2%D0%B5 %D1%8D%D1%82%D0%B8 %D1%81%D1%82%D1%80%D0%BE%D0%BA%D0%B8 %D0%B8 %D0%B2%D1%8B%D0%B2%D0%BE%D0%B4%D0%B8%D1%82%D1%8C %D0%BF%D0%BE%D0%BB%D1%83%D1%87%D0%B5%D0%BD%D0%BD%D1%83%D1%8E %D1%81%D1%82%D1%80%D0%BE%D0%BA%D1%83,%0A# %D1%80%D0%B0%D0%B7%D0%B4%D0%B5%D0%BB%D1%8F%D1%8F %D0%B8%D0%BC%D1%8F %D0%B8 %D0%BF%D1%81%D0%B5%D0%B2%D0%B4%D0%BE%D0%BD%D0%B8%D0%BC %D1%81 %D0%BF%D0%BE%D0%BC%D0%BE%D1%89%D1%8C%D1%8E %D1%82%D0%B8%D1%80%D0%B5.%0ANAME = %22%D0%A7%D0%B0%D1%80%D0%BB%D1%8C%D0%B7 %D0%9B%D1%8E%D1%82%D0%B2%D0%B8%D0%B4%D0%B6 %D0%94%D0%BE%D0%B4%D0%B6%D1%81%D0%BE%D0%BD%22%0Aprint(NAME)%0APNAME = input(%22%D0%95%D0%B3%D0%BE %D0%BF%D1%81%D0%B5%D0%B2%D0%B4%D0%BE%D0%BD%D0%B8%D0%BC?%5Cn%22)%0Aprint(NAME + %22 - %22 + PNAME)%0Ainp = input()%0A# KAKURKIN I.V.%0A# 29.02.2016%0A
|
|
8630d60a2ecdd2fac4153623ac64ba188e05d8b7
|
Add source code.
|
pigrelay.py
|
pigrelay.py
|
Python
| 0 |
@@ -0,0 +1,1772 @@
+import os%0Aimport sys%0Aimport time%0Aimport socket%0Aimport logging%0A%0Alogging.basicConfig(level=logging.INFO)%0Alogger = logging.getLogger(__name__)%0A%0ASOCKFILE = %22/tmp/snort_alert%22%0ABUFSIZE = 65863%0A%0AIP = '127.0.0.1'%0APORT = 51234%0A%0A# TODO: TLS/SSL wrapper for socket%0A%0A%0Aclass SnortListener():%0A%0A def __init__(self):%0A self.unsock = None%0A self.nwsock = None%0A%0A def start_send(self):%0A '''Open a client on Network Socket'''%0A self.nwsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)%0A try:%0A self.nwsock.connect((IP, PORT))%0A except Exception, e:%0A logger.info(%22Network socket connection error: %25s%22 %25 e)%0A sys.exit(1)%0A%0A def start_recv(self):%0A '''Open a server on Unix Domain Socket'''%0A if os.path.exists(SOCKFILE):%0A os.unlink(SOCKFILE)%0A%0A self.unsock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)%0A self.unsock.bind(SOCKFILE)%0A logger.info(%22Unix Domain Socket listening...%22)%0A self.recv_loop()%0A%0A def recv_loop(self):%0A '''Receive Snort alert on Unix Domain Socket and%0A send to Network Socket Server forever'''%0A logger.info(%22Start the network socket client....%22)%0A self.start_send()%0A while True:%0A data = self.unsock.recv(BUFSIZE)%0A time.sleep(0.5)%0A if data:%0A logger.debug(%22Send %7B0%7D bytes of data.%22.format%0A (sys.getsizeof(data)))%0A # data == 65900 byte%0A self.tcp_send(data)%0A else:%0A pass%0A%0A def tcp_send(self, data):%0A self.nwsock.sendall(data)%0A logger.info(%22Send the alert messages to Ryu.%22)%0A%0A%0Aif __name__ == '__main__':%0A server = SnortListener()%0A server.start_recv()%0A
|
|
5eaf4675baa161e17caaff4fe8dc21b38e292f40
|
Add summary test
|
blaze/compute/tests/test_mongo_compute.py
|
blaze/compute/tests/test_mongo_compute.py
|
from __future__ import absolute_import, division, print_function
import pytest
pymongo = pytest.importorskip('pymongo')
from datetime import datetime
from toolz import pluck
from blaze import into, compute, compute_one
from blaze.compute.mongo import MongoQuery
from blaze.expr import TableSymbol, by
from blaze.compatibility import xfail
@pytest.fixture
def conn():
try:
return pymongo.MongoClient()
except pymongo.errors.ConnectionFailure:
pytest.skip('No mongo server running')
@pytest.fixture
def db(conn):
return conn.test_db
@pytest.fixture
def bank_raw():
data = [{'name': 'Alice', 'amount': 100},
{'name': 'Alice', 'amount': 200},
{'name': 'Bob', 'amount': 100},
{'name': 'Bob', 'amount': 200},
{'name': 'Bob', 'amount': 300}]
return data
@pytest.yield_fixture
def bank(db, bank_raw):
coll = db.tmp_collection
coll = into(coll, bank_raw)
yield coll
coll.drop()
@pytest.yield_fixture
def points(db):
data = [{'x': 1, 'y': 10, 'z': 100},
{'x': 2, 'y': 20, 'z': 200},
{'x': 3, 'y': 30, 'z': 300},
{'x': 4, 'y': 40, 'z': 400}]
coll = db.tmp_collection
coll = into(coll, data)
yield coll
coll.drop()
@pytest.yield_fixture
def events(db):
data = [{'time': datetime(2012, 1, 1, 12, 00, 00), 'x': 1},
{'time': datetime(2012, 1, 2, 12, 00, 00), 'x': 2},
{'time': datetime(2012, 1, 3, 12, 00, 00), 'x': 3}]
coll = db.tmp_collection
coll = into(coll, data)
yield coll
coll.drop()
@pytest.fixture
def t():
return TableSymbol('t', '{name: string, amount: int}')
@pytest.fixture
def p():
return TableSymbol('p', '{x: int, y: int, z: int}')
@pytest.fixture
def e():
return TableSymbol('e', '{time: datetime, x: int}')
@pytest.fixture
def q():
return MongoQuery('fake', [])
def test_tablesymbol_one(t, bank):
assert compute_one(t, bank) == MongoQuery(bank, ())
def test_tablesymbol(t, bank, bank_raw):
assert compute(t, bank) == list(pluck(['name', 'amount'], bank_raw))
def test_projection_one(t, q):
assert compute_one(t[['name']], q).query == ({'$project': {'name': 1}},)
def test_head_one(t, q):
assert compute_one(t.head(5), q).query == ({'$limit': 5},)
def test_head(t, bank):
assert len(compute(t.head(2), bank)) == 2
def test_projection(t, bank):
assert set(compute(t.name, bank)) == set(['Alice', 'Bob'])
assert set(compute(t[['name']], bank)) == set([('Alice',), ('Bob',)])
def test_selection(t, bank):
assert set(compute(t[t.name=='Alice'], bank)) == set([('Alice', 100),
('Alice', 200)])
assert set(compute(t['Alice'==t.name], bank)) == set([('Alice', 100),
('Alice', 200)])
assert set(compute(t[t.amount > 200], bank)) == set([('Bob', 300)])
assert set(compute(t[t.amount >= 200], bank)) == set([('Bob', 300),
('Bob', 200),
('Alice', 200)])
assert set(compute(t[t.name!='Alice'].name, bank)) == set(['Bob'])
assert set(compute(t[(t.name=='Alice') & (t.amount > 150)], bank)) == \
set([('Alice', 200)])
assert set(compute(t[(t.name=='Alice') | (t.amount > 250)], bank)) == \
set([('Alice', 200),
('Alice', 100),
('Bob', 300)])
@xfail
def test_columnwise(p, points):
assert set(compute(p.x + p.y, points)) == set([11, 22, 33])
def test_by_one(t, q):
assert compute_one(by(t.name, t.amount.sum()), q).query == \
({'$group': {'_id': {'name': '$name'},
'amount_sum': {'$sum': '$amount'}}},
{'$project': {'amount_sum': '$amount_sum', 'name': '$_id.name'}})
def test_by(t, bank):
assert set(compute(by(t.name, t.amount.sum()), bank)) == \
set([('Alice', 300), ('Bob', 600)])
assert set(compute(by(t.name, t.amount.min()), bank)) == \
set([('Alice', 100), ('Bob', 100)])
assert set(compute(by(t.name, t.amount.max()), bank)) == \
set([('Alice', 200), ('Bob', 300)])
assert set(compute(by(t.name, t.name.count()), bank)) == \
set([('Alice', 2), ('Bob', 3)])
def test_reductions(t, bank):
assert compute(t.amount.min(), bank) == 100
assert compute(t.amount.max(), bank) == 300
assert compute(t.amount.sum(), bank) == 900
def test_distinct(t, bank):
assert set(compute(t.name.distinct(), bank)) == set(['Alice', 'Bob'])
def test_sort(t, bank):
assert compute(t.amount.sort('amount'), bank) == \
[100, 100, 200, 200, 300]
assert compute(t.amount.sort('amount', ascending=False), bank) == \
[300, 200, 200, 100, 100]
def test_by_multi_column(t, bank, bank_raw):
assert set(compute(by(t[['name', 'amount']], t.count()), bank)) == \
set([(d['name'], d['amount'], 1) for d in bank_raw])
def test_datetime_handling(e, events):
assert set(compute(e[e.time >= datetime(2012, 1, 2, 12, 0, 0)].x,
events)) == set([2, 3])
assert set(compute(e[e.time >= "2012-01-02"].x,
events)) == set([2, 3])
|
Python
| 0.000003 |
@@ -5318,28 +5318,228 @@
events)) == set(%5B2, 3%5D)%0A
+%0A%0Adef test_summary_kwargs(t, bank):%0A expr = by(t.name, total=t.amount.sum(), avg=t.amount.mean())%0A result = compute(expr, bank)%0A assert result == %5B('Bob', 200.0, 600), ('Alice', 150.0, 300)%5D%0A
|
e15de99ae79e97becdc3d4a4a2bcf60e70e5d2d4
|
Create escolas_ceara.senso_2013.py
|
raspagem/random/escolas_ceara.senso_2013.py
|
raspagem/random/escolas_ceara.senso_2013.py
|
Python
| 0.000093 |
@@ -0,0 +1,751 @@
+import urllib.request%0Aimport json%0A%0A#url = 'http://educacao.dadosabertosbr.com/api/cidades/ce'%0A#cidades = urllib.request.urlopen(url).read()%0A#cidades = json.loads(cidades.decode('utf-8'))%0A%0A#print('Lista de Cidades')%0A#%5Bprint(cidade.split(':')%5B1%5D) for cidade in cidades%5D%0A%0Aprint('Lista de Escolas Municipais de Aracati com Lab de Inform%C3%A1tica')%0Aurl = 'http://educacao.dadosabertosbr.com/api/escolas/buscaavancada?cidade=2301109&laboratorioInformatica=on&situacaoFuncionamento=1&dependenciaAdministrativa=3'%0A%0Aescolas = urllib.request.urlopen(url).read() # em bytes%0A%0A# obt%C3%A9m uma lista de escolas%0Aescolas = json.loads(escolas.decode('utf-8')) %0A%0Aqtde, escolas = escolas%0A%0Afor escola in escolas:%0A%09print(escola%5B'nome'%5D)%0A%0Aprint('total:%7B0%7D%5Cn'.format(len(escolas)))%0A
|
|
ef38b112a2cf46fe1bbb52a9633fd42fad03ddb0
|
Update method name
|
rtv/subscriptions.py
|
rtv/subscriptions.py
|
import curses
import sys
import time
import logging
from .content import SubscriptionContent
from .page import BasePage, Navigator, BaseController
from .curses_helpers import (Color, LoadScreen, add_line)
__all__ = ['SubscriptionController', 'SubscriptionPage']
_logger = logging.getLogger(__name__)
class SubscriptionController(BaseController):
character_map = {}
class SubscriptionPage(BasePage):
def __init__(self, stdscr, reddit):
self.controller = SubscriptionController(self)
self.loader = LoadScreen(stdscr)
self.selected_subreddit_data = None
content = SubscriptionContent.from_user(reddit, self.loader)
super(SubscriptionPage, self).__init__(stdscr, reddit, content)
def loop(self):
"Main control loop"
self.active = True
while self.active:
self.draw()
cmd = self.stdscr.getch()
self.controller.trigger(cmd)
@SubscriptionController.register(curses.KEY_F5, 'r')
def refresh_content(self):
"Re-download all subscriptions and reset the page index"
self.content = SubscriptionContent.get_list(self.reddit, self.loader)
self.nav = Navigator(self.content.get)
@SubscriptionController.register(curses.KEY_ENTER, 10, curses.KEY_RIGHT)
def store_selected_subreddit(self):
"Store the selected subreddit and return to the subreddit page"
self.selected_subreddit_data = self.content.get(self.nav.absolute_index)
self.active = False
@SubscriptionController.register(curses.KEY_LEFT, 'h', 's')
def close_subscriptions(self):
"Close subscriptions and return to the subreddit page"
self.active = False
@staticmethod
def draw_item(win, data, inverted=False):
n_rows, n_cols = win.getmaxyx()
n_cols -= 1 # Leave space for the cursor in the first column
# Handle the case where the window is not large enough to fit the data.
valid_rows = range(0, n_rows)
offset = 0 if not inverted else -(data['n_rows'] - n_rows)
row = offset
if row in valid_rows:
attr = curses.A_BOLD | Color.YELLOW
add_line(win, u'{name}'.format(**data), row, 1, attr)
row = offset + 1
for row, text in enumerate(data['split_title'], start=row):
if row in valid_rows:
add_line(win, text, row, 1)
|
Python
| 0.000002 |
@@ -1133,16 +1133,17 @@
ent.
-get_list
+from_user
(sel
@@ -2402,8 +2402,9 @@
row, 1)
+%0A
|
43e48de2210873fd5ab6c9181f0aab7884c529be
|
Create app.py
|
reddit2telegram/channels/r_communism/app.py
|
reddit2telegram/channels/r_communism/app.py
|
Python
| 0.000003 |
@@ -0,0 +1,141 @@
+#encoding:utf-8%0A%0Asubreddit = 'communism'%0At_channel = '@r_communism'%0A%0A%0Adef send_post(submission, r2t):%0A return r2t.send_simple(submission)%0A
|
|
d17c14df00c31af49080ff2f9fea8597a8861461
|
Add recipe usage command for quick diagnostics.
|
starthinker_ui/recipe/management/commands/recipe_usage.py
|
starthinker_ui/recipe/management/commands/recipe_usage.py
|
Python
| 0.0001 |
@@ -0,0 +1,1405 @@
+###########################################################################%0A#%0A# Copyright 2022 Google LLC%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22);%0A# you may not use this file except in compliance with the License.%0A# You may obtain a copy of the License at%0A#%0A# https://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS,%0A# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.%0A# See the License for the specific language governing permissions and%0A# limitations under the License.%0A#%0A###########################################################################%0A%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom django.conf import settings%0A%0Afrom starthinker_ui.account.models import Account%0A%0Aclass Command(BaseCommand):%0A help = 'Prints recipe count and age by account.'%0A%0A def handle(self, *args, **kwargs):%0A usage = %5B%5D%0A%0A for account in Account.objects.all():%0A usage.append(%7B%0A 'email':account.email,%0A 'recipes':list(account.recipe_set.all().values_list('birthday', flat=True))%0A %7D)%0A%0A usage.sort(key=lambda u: len(u%5B'recipes'%5D), reverse=True)%0A%0A for u in usage:%0A print ('%7B%7D, %7B%7D, %7B%7D'.format(u%5B'email'%5D, len(u%5B'recipes'%5D), max(u%5B'recipes'%5D) if u%5B'recipes'%5D else ''))%0A
|
|
2b797879f3a4bf148575df1309369ed4532e4ab6
|
bump version to 14.1.0
|
zmq/sugar/version.py
|
zmq/sugar/version.py
|
"""PyZMQ and 0MQ version functions."""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 Brian Granger, Min Ragan-Kelley
#
# This file is part of pyzmq
#
# Distributed under the terms of the New BSD License. The full license is in
# the file COPYING.BSD, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from zmq.backend import zmq_version_info
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
VERSION_MAJOR = 14
VERSION_MINOR = 1
VERSION_PATCH = 0
VERSION_EXTRA = 'dev'
__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
if VERSION_EXTRA:
__version__ = "%s-%s" % (__version__, VERSION_EXTRA)
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
else:
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of at least three numbers
If pyzmq is a development version, `inf` will be appended after the third integer.
"""
return version_info
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
|
Python
| 0 |
@@ -871,19 +871,16 @@
XTRA = '
-dev
'%0A__vers
|
5b2c328b94244fd6baf6403349919f3bc4f2d013
|
add missing migration
|
meinberlin/apps/cms/migrations/0032_update_body.py
|
meinberlin/apps/cms/migrations/0032_update_body.py
|
Python
| 0.000258 |
@@ -0,0 +1,2509 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.18 on 2019-01-23 16:07%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations%0Aimport meinberlin.apps.cms.blocks%0Aimport wagtail.core.blocks%0Aimport wagtail.core.fields%0Aimport wagtail.images.blocks%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('meinberlin_cms', '0031_mapteaser_block'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='homepage',%0A name='body',%0A field=wagtail.core.fields.StreamField(%5B('paragraph', wagtail.core.blocks.RichTextBlock(template='meinberlin_cms/blocks/richtext_block.html')), ('call_to_action', wagtail.core.blocks.StructBlock(%5B('body', wagtail.core.blocks.RichTextBlock()), ('link', wagtail.core.blocks.CharBlock()), ('link_text', wagtail.core.blocks.CharBlock(label='Link Text', max_length=50))%5D)), ('image_call_to_action', wagtail.core.blocks.StructBlock(%5B('image', wagtail.images.blocks.ImageChooserBlock()), ('title', wagtail.core.blocks.CharBlock(max_length=80)), ('body', wagtail.core.blocks.RichTextBlock()), ('link', wagtail.core.blocks.CharBlock()), ('link_text', wagtail.core.blocks.CharBlock(label='Link Text', max_length=50))%5D)), ('columns_text', wagtail.core.blocks.StructBlock(%5B('columns_count', wagtail.core.blocks.ChoiceBlock(choices=%5B(2, 'Two columns'), (3, 'Three columns'), (4, 'Four columns')%5D)), ('columns', wagtail.core.blocks.ListBlock(wagtail.core.blocks.RichTextBlock(label='Column body')))%5D)), ('projects', wagtail.core.blocks.StructBlock(%5B('title', wagtail.core.blocks.CharBlock(max_length=80)), ('projects', wagtail.core.blocks.ListBlock(meinberlin.apps.cms.blocks.ProjectSelectionBlock(label='Project')))%5D)), ('activities', wagtail.core.blocks.StructBlock(%5B('heading', wagtail.core.blocks.CharBlock(label='Heading')), ('count', wagtail.core.blocks.IntegerBlock(default=5, label='Count'))%5D)), ('accordion', wagtail.core.blocks.StructBlock(%5B('title', wagtail.core.blocks.CharBlock()), ('body', wagtail.core.blocks.RichTextBlock(required=False))%5D)), ('infographic', wagtail.core.blocks.StructBlock(%5B('text_left', wagtail.core.blocks.CharBlock(max_length=50)), ('text_center', wagtail.core.blocks.CharBlock(max_length=50)), ('text_right', wagtail.core.blocks.CharBlock(max_length=50))%5D)), ('map_teaser', wagtail.core.blocks.StructBlock(%5B('image', wagtail.images.blocks.ImageChooserBlock()), ('icon', wagtail.core.blocks.RichTextBlock()), ('body', wagtail.core.blocks.RichTextBlock())%5D))%5D),%0A ),%0A %5D%0A
|
|
e53a951ed98f460b603f43f6364d5d0a0f17a1ba
|
Add basic class structure, map functionality, and a set of consumer functions.
|
src/streamer.py
|
src/streamer.py
|
Python
| 0 |
@@ -0,0 +1,618 @@
+class pStream:%0A###PRIVATE FUNCTIONS%0A%0A def _builder(self, expression):%0A self.STR = expression%0A return self%0A%0A%0A###OVERRIDES%0A def next(self):%0A return next(self.STR)%0A%0A def __init__(self, iterable_thing):%0A self.STR = iterable_thing%0A%0A def __iter__(self):%0A return iter(self.STR)%0A%0A### TRANSFORMS%0A def map(self,function):%0A return self._builder(map(function, self.STR))%0A%0A### CONSUMERS%0A%0A def print_stream(self):%0A print(list(self.STR))%0A%0A def consume(self, function):%0A function(self.STR)%0A%0A def drain(self):%0A for x in self.STR:%0A pass%0A%0A
|
|
82e871441010999e4a369b101019a34e7b03eca4
|
add common csp-report URL
|
security/urls.py
|
security/urls.py
|
Python
| 0 |
@@ -0,0 +1,162 @@
+from django.conf.urls import patterns, include, url%0A%0Aurlpatterns = patterns('security.views',%0A url('%5E/csp-report/$', security.views.csp_report),%0A )%0A
|
|
86c3f149726b58951e85f9bd6e324b032430b5ae
|
Deploy a schema template (#51379)
|
lib/ansible/modules/network/aci/mso_schema_template_deploy.py
|
lib/ansible/modules/network/aci/mso_schema_template_deploy.py
|
Python
| 0 |
@@ -0,0 +1,2798 @@
+#!/usr/bin/python%0A# -*- coding: utf-8 -*-%0A%0A# Copyright: (c) 2018, Dag Wieers (@dagwieers) %[email protected]%3E%0A# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)%0A%0Afrom __future__ import absolute_import, division, print_function%0A__metaclass__ = type%0A%0AANSIBLE_METADATA = %7B'metadata_version': '1.1',%0A 'status': %5B'preview'%5D,%0A 'supported_by': 'community'%7D%0A%0ADOCUMENTATION = r'''%0A---%0Amodule: mso_schema_template_deploy%0Ashort_description: Deploy schema templates to sites%0Adescription:%0A- Deploy schema templates to sites.%0Aauthor:%0A- Dag Wieers (@dagwieers)%0Aversion_added: '2.8'%0Aoptions:%0A schema:%0A description:%0A - The name of the schema.%0A type: str%0A required: yes%0A template:%0A description:%0A - The name of the template.%0A type: str%0A aliases: %5B name %5D%0A state:%0A description:%0A - Use C(deploy) to deploy schema template.%0A - Use C(status) to get deployment status.%0A type: str%0A choices: %5B deploy, status %5D%0A default: deploy%0Aseealso:%0A- module: mso_schema_site%0A- module: mso_schema_template%0Aextends_documentation_fragment: mso%0A'''%0A%0AEXAMPLES = r'''%0A- name: Deploy a schema template%0A mso_schema_template:%0A host: mso_host%0A username: admin%0A password: SomeSecretPassword%0A schema: Schema 1%0A template: Template 1%0A state: deploy%0A delegate_to: localhost%0A%0A- name: Get deployment status%0A mso_schema:%0A host: mso_host%0A username: admin%0A password: SomeSecretPassword%0A schema: Schema 1%0A template: Template 1%0A state: status%0A delegate_to: localhost%0A register: status_result%0A'''%0A%0ARETURN = r'''%0A'''%0A%0Afrom ansible.module_utils.basic import AnsibleModule%0Afrom ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec%0A%0A%0Adef main():%0A argument_spec = mso_argument_spec()%0A argument_spec.update(%0A schema=dict(type='str', required=True),%0A template=dict(type='str', required=True, aliases=%5B'name'%5D),%0A state=dict(type='str', default='deploy', choices=%5B'deploy', 'status'%5D),%0A )%0A%0A module = AnsibleModule(%0A argument_spec=argument_spec,%0A supports_check_mode=True,%0A )%0A%0A schema = module.params%5B'schema'%5D%0A template = module.params%5B'template'%5D%0A state = module.params%5B'state'%5D%0A%0A mso = MSOModule(module)%0A%0A # Get schema%0A schema_id = mso.lookup_schema(schema)%0A%0A payload = dict(%0A schemaId=schema_id,%0A templateName=template,%0A )%0A%0A if state == 'deploy':%0A path = 'execute/schema/%7B0%7D/template/%7B1%7D'.format(schema_id, template)%0A elif state == 'status':%0A path = 'status/schema/%7B0%7D/template/%7B1%7D'.format(schema_id, template)%0A%0A if not module.check_mode:%0A status = mso.request(path, method='GET', data=payload)%0A%0A mso.exit_json(**status)%0A%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
0b6709670179c0721b4f113d13bf34d9ac7715dd
|
Add a python plotter that compares the results of with Stirling numbers
|
test/indices.py
|
test/indices.py
|
Python
| 0.000002 |
@@ -0,0 +1,767 @@
+import matplotlib.pyplot as plt%0Aimport numpy%0A%0Afrom math import factorial%0A%0Adef binom(a,b):%0A return factorial(a) / (factorial(b)*factorial(a-b))%0A%0Adef stirling(n,k):%0A if n%3C=0 or n!=0 and n==k:%0A return 1%0A elif k%3C=0 or n%3Ck:%0A return 0%0A elif n==0 and k==0:%0A return -1%0A else:%0A s = sum((-1)**(k-j)*binom(k,j)*j**n for j in range(k+1))%0A return s / factorial(k)%0A%0Alog = %5B%5D%0Awith open(%22indices.log%22) as indices:%0A next(indices)%0A for line in indices:%0A indices = line.split()%5B1:7%5D%0A size = len(set(indices))%0A log.append(size)%0A%0Astirlings = numpy.array(%5Bstirling(6, k) for k in range(1,7)%5D)%0Aplt.hist(log, %5B0.5,1.5,2.5,3.5,4.5,5.5,6.5,7.5%5D)%0Aplt.plot(range(1,7), stirlings * len(log)/stirlings.sum())%0Aplt.show()%0A
|
|
7b179e4a420a3cd7a27f0f438a6eac462048bb93
|
Add py solution for 554. Brick Wall
|
py/brick-wall.py
|
py/brick-wall.py
|
Python
| 0.004967 |
@@ -0,0 +1,977 @@
+import heapq%0Aclass Solution(object):%0A def leastBricks(self, wall):%0A %22%22%22%0A :type wall: List%5BList%5Bint%5D%5D%0A :rtype: int%0A %22%22%22%0A n_row = len(wall)%0A heap = %5B(wall%5Bi%5D%5B0%5D, i, 0) for i in xrange(n_row)%5D%0A heapq.heapify(heap)%0A max_noncross = 0%0A while True:%0A l, idx, offset = heapq.heappop(heap)%0A cur_l = l%0A if offset == len(wall%5Bidx%5D) - 1:%0A break%0A heapq.heappush(heap, (l + wall%5Bidx%5D%5Boffset + 1%5D, idx, offset + 1))%0A cnt = 1%0A while True:%0A ol, oidx, ooffset = heapq.heappop(heap)%0A if ol == l:%0A cnt += 1%0A heapq.heappush(heap, (ol + wall%5Boidx%5D%5Booffset + 1%5D, oidx, ooffset + 1))%0A elif ol %3E l:%0A heapq.heappush(heap, (ol, oidx, ooffset))%0A break%0A max_noncross = max(max_noncross, cnt)%0A return n_row - max_noncross%0A
|
|
d6f029de32212d0865cb314a011cb4f607e6fa22
|
Add function for avoiding pickeling functions.
|
distarray/dist/mpionly_utils.py
|
distarray/dist/mpionly_utils.py
|
"""
Utilities for running Distarray in MPI mode.
"""
from __future__ import absolute_import
from importlib import import_module
from mpi4py import MPI as mpi
import distarray
from distarray.utils import uid
world = mpi.COMM_WORLD
world_rank = world.rank
client_rank = 0
def get_rank():
return world.rank
def get_nengines():
"""Get the number of engines which must be COMM_WORLD.size - 1 (for the
client)
"""
return world.size - 1
def _set_on_main(name, obj):
"""Add obj as an attribute to the __main__ module with alias `name` like:
__main__.name = obj
"""
main = import_module('__main__')
setattr(main, name, obj)
def make_intercomm(targets=None):
# create a comm that is split into client and engines.
targets = targets or list(range(world.size - 1))
if world_rank == client_rank:
split_world = world.Split(0, 0)
else:
split_world = world.Split(1, world_rank)
# create the intercomm
if world_rank == client_rank:
intercomm = split_world.Create_intercomm(0, world, 1)
else:
intercomm = split_world.Create_intercomm(0, world, 0)
return intercomm
def make_base_comm():
"""
Creates an intracomm consisting of all the engines. Then sets:
`__main__._base_comm = comm_name`
"""
if world_rank == 0:
comm_name = uid()
else:
comm_name = ''
comm_name = world.bcast(comm_name)
engines = world.group.Excl([client_rank])
engine_comm = world.Create(engines)
_set_on_main(comm_name, engine_comm)
return comm_name
def make_targets_comm(targets):
""" This is incorrect we need a mapping from the targets to world"""
targets = targets or list(range(world.size - 1))
# get a universal name for the out comm
if world_rank == 0:
comm_name = uid()
else:
comm_name = ''
comm_name = world.bcast(comm_name)
# create a mapping from the targets to world ranks
all_ranks = range(1, world.size)
all_targets = range(world.size - 1)
target_to_rank_map = {t: r for t, r in zip(all_targets, all_ranks)}
# map the targets to the world ranks
mapped_targets = [target_to_rank_map[t] for t in targets]
# create the targets comm
targets_group = world.group.Incl(mapped_targets)
targets_comm = world.Create(targets_group)
_set_on_main(comm_name, targets_comm)
return comm_name
def setup_engine_comm(targets=None):
# create a comm that is split into client and engines.
targets = range(world.size - 1) if targets is None else targets
name = uid()
if world_rank == client_rank:
split_world = world.Split(0, 0)
elif (world_rank + 1) in targets:
split_world = world.Split(1, world_rank)
_set_on_main(name, split_world)
else:
world.Split(2, world_rank)
def initial_comm_setup(targets=None):
"""Setup client and engine intracomm, and intercomm."""
# choose a name for _base_comm
if world_rank == 0:
comm_name = uid()
else:
comm_name = ''
comm_name = world.bcast(comm_name)
# create a comm that is split into client and engines.
if world_rank == client_rank:
split_world = world.Split(0, 0)
else:
split_world = world.Split(1, world_rank)
# attach the comm to __main__, name it comm_name
_set_on_main(comm_name, split_world)
# make the comm_name available at distarray._BASE_COMM
setattr(distarray, '_BASE_COMM', comm_name)
# create the intercomm
if world_rank == client_rank:
intercomm = split_world.Create_intercomm(0, world, 1)
else:
intercomm = split_world.Create_intercomm(0, world, 0)
# make interomm available at distarray.INTERCOMM
setattr(distarray, 'INTERCOMM', intercomm)
def is_solo_mpi_process():
if world.size == 1:
return True
else:
return False
|
Python
| 0 |
@@ -269,16 +269,746 @@
k = 0%0A%0A%0A
+def push_function(context, key, func):%0A func_code = func.__code__%0A func_globals = func.__globals__ # noqa%0A func_name = func.__name__%0A func_defaults = func.__defaults__%0A func_closure = func.__closure__%0A%0A func_data = (func_code, func_name, func_defaults, func_closure)%0A%0A def reassemble_and_store_func(key, func_data):%0A import types%0A from importlib import import_module%0A main = import_module('__main__')%0A func = types.FunctionType(func_data%5B0%5D, main.__dict__, func_data%5B1%5D,%0A func_data%5B2%5D, func_data%5B3%5D)%0A setattr(main, key, func)%0A%0A context.apply(reassemble_and_store_func, args=(key, func_data),%0A targets=context.targets)%0A%0A%0A
def get_
|
846ad2780ad2ccc9afbd6a224d567389bf09611f
|
add integer-to-english-words
|
vol6/integer-to-english-words/integer-to-english-words.py
|
vol6/integer-to-english-words/integer-to-english-words.py
|
Python
| 0.999999 |
@@ -0,0 +1,1392 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A# @Author: Zeyuan Shang%0A# @Date: 2015-11-06 21:35:56%0A# @Last Modified by: Zeyuan Shang%0A# @Last Modified time: 2015-11-06 21:36:07%0Aclass Solution(object):%0A def numberToWords(self, num):%0A %22%22%22%0A :type num: int%0A :rtype: str%0A %22%22%22%0A if num == 0:%0A return 'Zero'%0A less20 = %5B'One', 'Two', 'Three', 'Four', 'Five', 'Six', 'Seven', 'Eight', 'Nine', 'Ten', %0A 'Eleven', 'Twelve', 'Thirteen', 'Fourteen', 'Fifteen', 'Sixteen', 'Seventeen', 'Eighteen', 'Nineteen'%5D%0A tens = %5B'Twenty', 'Thirty', 'Forty', 'Fifty', 'Sixty', 'Seventy', 'Eighty', 'Ninety'%5D%0A %0A def words(num):%0A if num == 0:%0A return %5B%5D%0A elif num %3C 20:%0A return %5Bless20%5Bnum - 1%5D%5D%0A elif num %3C 100:%0A return %5Btens%5Bnum / 10 - 2%5D%5D + words(num %25 10)%0A elif num %3C 1000:%0A return %5Bless20%5Bnum / 100 - 1%5D, 'Hundred'%5D + words(num %25 100)%0A elif num %3C 10 ** 6:%0A return words(num / (10 ** 3)) + %5B'Thousand'%5D + words(num %25 (10 ** 3))%0A elif num %3C 10 ** 9:%0A return words(num / (10 ** 6)) + %5B'Million'%5D + words(num %25 (10 ** 6))%0A else:%0A return words(num / (10 ** 9)) + %5B'Billion'%5D + words(num %25 (10 ** 9))%0A %0A return ' '.join(words(num))
|
|
f02f8f5a68bd26d1ece32c50482729b7774b6e2a
|
Add a simple script for looking at connections.
|
scripts/simple-analysis.py
|
scripts/simple-analysis.py
|
Python
| 0 |
@@ -0,0 +1,770 @@
+#!/usr/bin/python%0A%0Afrom __future__ import print_function, division%0A%0Aimport networkx%0A%0Afrom reddit_meatspace.models import MeetupConnections%0A%0A%0Aconnections = MeetupConnections._byID(%222013%22)%0Adigraph = networkx.DiGraph()%0Afor connection, timestamp in connections._values().iteritems():%0A left, right = connection.split(%22:%22)%0A digraph.add_edge(left, right)%0A%0Alenient = digraph.to_undirected(reciprocal=False)%0Astrict = digraph.to_undirected(reciprocal=True)%0Ameetups = networkx.connected_component_subgraphs(lenient)%0A%0Aprint(%22%7B0%7D people @ %7B1%7D meetups (avg. %7B2:.2%7D per meetup)%22.format(%0A len(lenient), len(meetups), len(lenient) / len(meetups)))%0Aprint(%22%7B0%7D connections of %7B1%7D distinct meetings (%7B2:.2%25%7D)%22.format(strict.size(), lenient.size(), strict.size() / lenient.size()))%0A
|
|
e3a36aaab3abe7c645e3b8491cd163dea8ff0fea
|
add python solution to "project euler - problem 4"
|
problem4.py
|
problem4.py
|
Python
| 0 |
@@ -0,0 +1,494 @@
+%0Adef is_palindrome(number):%0A%09number = str(number)%0A%09number_digits = len(number)%0A%09for i in xrange(0, number_digits/2):%0A%09%09if number%5Bi%5D != number%5Bnumber_digits-i-1%5D:%0A%09%09%09return False%0A%09return True%0A%0Adef test_it(n):%0A%09print n, is_palindrome(n)%0A%0Afor x in xrange(0,1231):%09%0A%09test_it(x);%0A%0Agreatest_palindrome = 0%0Afor x in xrange(100,999):%0A%09for y in xrange(100,999):%0A%09%09product = x * y%0A%09%09if is_palindrome(product) and product %3E greatest_palindrome:%0A%09%09%09greatest_palindrome = product%0A%0Aprint greatest_palindrome%0A
|
|
bf4b3d79f34e189a30b8168796fa1595bf49f1d7
|
Fix field name
|
core/migrations/0002_auto_20150907_1413.py
|
core/migrations/0002_auto_20150907_1413.py
|
Python
| 0.000003 |
@@ -0,0 +1,378 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('core', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.RenameField(%0A model_name='notification',%0A old_name='name',%0A new_name='channel',%0A ),%0A %5D%0A
|
|
404051ebc9d68c571be77e177b0455631f0c14ad
|
create basic model `Food` for run all tests
|
tests/models.py
|
tests/models.py
|
Python
| 0.000003 |
@@ -0,0 +1,344 @@
+from __future__ import unicode_literals%0A%0Afrom django.db import models%0Afrom django.utils.encoding import python_2_unicode_compatible%0A%0Afrom tags.fields import TagField%0A%0A%0A@python_2_unicode_compatible%0Aclass Food(models.Model):%0A name = models.CharField(max_length=50)%0A%0A tags = TagField('Tags')%0A%0A def __str__(self):%0A return self.name%0A
|
|
50ca8ffb078b37b472c1ccbbb2a4f21e8d7eade4
|
fix gbformatting
|
django-openstack/django_openstack/templatetags/templatetags/sizeformat.py
|
django-openstack/django_openstack/templatetags/templatetags/sizeformat.py
|
"""
Template tags for displaying sizes
"""
import datetime
from django import template
from django.utils import translation
from django.utils import formats
register = template.Library()
def int_format(value):
return int(value)
def float_format(value):
return formats.number_format(round(value, 1), 0)
def filesizeformat(bytes, filesize_number_format):
try:
bytes = float(bytes)
except (TypeError,ValueError,UnicodeDecodeError):
return translation.ungettext("%(size)d byte", "%(size)d bytes", 0) % {'size': 0}
if bytes < 1024:
return translation.ungettext("%(size)d", "%(size)d", bytes) % {'size': bytes}
if bytes < 1024 * 1024:
return translation.ugettext("%s KB") % filesize_number_format(bytes / 1024)
if bytes < 1024 * 1024 * 1024:
return translation.ugettext("%s MB") % filesize_number_format(bytes / (1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024:
return translation.ugettext("%s GB") % filesize_number_format(bytes / (1024 * 1024 * 1024))
if bytes < 1024 * 1024 * 1024 * 1024 * 1024:
return translation.ugettext("%s TB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024))
return translation.ugettext("%s PB") % filesize_number_format(bytes / (1024 * 1024 * 1024 * 1024 * 1024))
@register.filter(name='mbformat')
def mbformat(mb):
return filesizeformat(mb * 1024 * 1024, int_format).replace(' ', '')
@register.filter(name='diskgbformat')
def diskgbformat(gb):
return filesizeformat(gb * 1000 * 1000, float_format).replace(' ', '')
|
Python
| 0.000002 |
@@ -1525,17 +1525,24 @@
* 10
-00
+24 * 1024
* 10
-00
+24
, fl
|
106bce6081ed6c1c8442a1bb82aa39ef177bed8a
|
Solve problem 6
|
problem006.py
|
problem006.py
|
Python
| 0.999998 |
@@ -0,0 +1,194 @@
+#!/usr/bin/env python3%0A%0Adef sumsq(maxnumber):%0A return sum(n**2 for n in range(1, maxnumber+1))%0A%0Adef sqsum(maxnumber):%0A return sum(range(1, maxnumber+1))**2%0A%0Aprint(sqsum(100) - sumsq(100))%0A
|
|
fab193fa2c31b2e1cf58255d37f5e8dd63fef206
|
Add high level for key signing
|
libnacl/sign.py
|
libnacl/sign.py
|
Python
| 0.000001 |
@@ -0,0 +1,1210 @@
+'''%0AHigh level routines to maintain signing keys and to sign and verify messages%0A'''%0A# Import libancl libs%0Aimport libnacl%0Aimport libnacl.utils%0Aimport libnacl.encode%0A%0A%0Aclass Signer(libnacl.utils.BaseKey):%0A '''%0A The tools needed to sign messages%0A '''%0A def __init__(self, seed=None):%0A '''%0A Create a signing key, if not seed it supplied a keypair is generated%0A '''%0A if seed:%0A if len(seed) != libnacl.crypto_sign_SEEDBYTES:%0A raise ValueError('Invalid seed bytes')%0A self.sk, self.vk = libnacl.crypto_sign_seed_keypair(seed)%0A else:%0A self.sk, self.vk = libnacl.crypto_sign_keypair()%0A%0A def sign(self, msg):%0A '''%0A Sign the given message with this key%0A '''%0A return libnacl.crypto_sign(msg, self.sk)%0A%0A%0Aclass Verifier(libnacl.utils.BaseKey):%0A '''%0A Verify signed messages%0A '''%0A def __init__(self, vk_hex):%0A '''%0A Create a verification key from a hex encoded vkey%0A '''%0A self.vk = libnacl.encode.hex_decode(vk_hex)%0A%0A def verify(self, msg):%0A '''%0A Verify the message with tis key%0A '''%0A return libnacl.crypto_sign_open(msg, self.vk)%0A
|
|
6332ca6fd715e730faf7e377843e2d2f8bfa2b84
|
Create blink.py
|
linkit/blink.py
|
linkit/blink.py
|
Python
| 0.000024 |
@@ -0,0 +1,673 @@
+#!/usr/bin/python%0Aimport mraa # For accessing the GPIO%0Aimport time # For sleeping between blinks%0A%0A%0ALED_GPIO = 5 # we are using D5 pin%0AblinkLed = mraa.Gpio(LED_GPIO) # Get the LED pin object%0AblinkLed.dir(mraa.DIR_OUT) # Set the direction as output%0AledState = False # LED is off to begin with%0AblinkLed.write(0)%0A# One infinite loop coming up%0Awhile True:%0A if ledState == False:%0A # LED is off, turn it on%0A blinkLed.write(1)%0A ledState = True # LED is on%0A else:%0A blinkLed.write(0)%0A ledState = False%0A print %22LED is on? %5CnAns: %25s%22 %25(ledState)%0A # Wait for some time %0A time.sleep(1)%0A
|
|
56b5b0d9f1fd420e2ea7cdb0654d5c2f9d637189
|
Add light theme
|
themes/light.py
|
themes/light.py
|
Python
| 0.000001 |
@@ -0,0 +1,886 @@
+from utils import colors%0A%0A%0A# Segments colors.%0AUSERATHOST_BG = colors.SMERALD%0AUSERATHOST_FG = colors.WHITE%0A%0ASSH_BG = colors.LIGHT_ORANGE%0ASSH_FG = colors.WHITE%0A%0ACURRENTDIR_BG = colors.MID_GREY%0ACURRENTDIR_FG = colors.LIGHT_GREY%0A%0AREADONLY_BG = colors.LIGHT_GREY%0AREADONLY_FG = colors.RED%0A%0AEXITCODE_BG = colors.RED%0AEXITCODE_FG = colors.WHITE%0A%0APADDING_BG = colors.WHITE%0A%0AGIT_UNTRACKED_FILES_BG = colors.PINKISH_RED%0AGIT_UNTRACKED_FILES_FG = colors.NEARLY_WHITE_GREY%0AGIT_CHANGES_NOT_STAGED_BG = colors.PINKISH_RED%0AGIT_CHANGES_NOT_STAGED_FG = colors.NEARLY_WHITE_GREY%0AGIT_ALL_CHANGES_STAGED_BG = colors.LIGHT_ORANGE%0AGIT_ALL_CHANGES_STAGED_FG = colors.DARKER_GREY%0AGIT_CLEAN_BG = colors.PISTACHIO%0AGIT_CLEAN_FG = colors.DARKER_GREY%0A%0AVENV_BG = colors.SMERALD%0AVENV_FG = colors.EXTRA_LIGHT_GREY%0A%0AJOBS_BG = colors.DARK_PURPLE%0AJOBS_FG = colors.WHITE%0A%0ATIME_BG = colors.LIGHT_GREY%0ATIME_FG = colors.MID_GREY
|
|
091e6a4450641ccc28dcd82e5225067425160cc2
|
Replace follow question by "want answers"
|
quora/pyquora.py
|
quora/pyquora.py
|
import requests
from bs4 import BeautifulSoup
import feedparser
import re
### Configuration ###
POSSIBLE_FEED_KEYS = ['link', 'id', 'published', 'title', 'summary']
### Enumerated Types ###
def enum(*sequential, **named):
enums = dict(zip(sequential, range(len(sequential))), **named)
reverse = dict((value, key) for key, value in enums.iteritems())
enums['reverse_mapping'] = reverse
return type('Enum', (), enums)
ACTIVITY_ITEM_TYPES = enum(UPVOTE=1, USER_FOLLOW=2, QUESTION_FOLLOW=3, ANSWER=4, QUESTION=5, REVIEW_REQUEST=6)
####################################################################
# Helpers
####################################################################
def try_cast(s):
try:
return int(s)
except ValueError:
return s
def get_name(source):
return str(source.find('span', attrs={'class' : 'user'}).string)
def get_count(element):
return try_cast(element.find('span', class_='profile-tab-count').string.replace(',', ''))
def get_count_for_user_href(soup, user, suffix):
return get_count(soup.find('a', class_='link_label', href='/' + user + '/' + suffix))
def build_feed_item(item):
dict = {}
keys = POSSIBLE_FEED_KEYS
for key in keys:
if key in item.keys():
dict[key] = item[key]
return dict
def check_activity_type(description):
soup = BeautifulSoup(description)
tag = soup.find('div', style="color: #666666;")
if tag is not None:
if 'voted up this' in tag.string:
return ACTIVITY_ITEM_TYPES.UPVOTE
elif 'followed a question' in tag.string:
return ACTIVITY_ITEM_TYPES.QUESTION_FOLLOW
elif 'added this answer' in tag.string:
return ACTIVITY_ITEM_TYPES.ANSWER
elif 'added a question' in tag.string:
return ACTIVITY_ITEM_TYPES.QUESTION
elif 'requested reviews.' in tag.string:
return ACTIVITY_ITEM_TYPES.REVIEW_REQUEST
else: # hopefully.
return ACTIVITY_ITEM_TYPES.USER_FOLLOW
def is_new_ui(soup):
return soup.find('div', attrs={'class': 'ProfileTabs'}) is not None
####################################################################
# API
####################################################################
class Quora:
@staticmethod
def get_user_stats(user):
soup = BeautifulSoup(requests.get('http://www.quora.com/' + user).text)
data_stats = []
name = get_name(soup)
err = None
for item in soup.findAll('span', attrs={'class' : 'profile_count'}):
m = re.findall('\d', str(item))
element = ''.join(m)
data_stats.append(element)
user_dict = {'answers' : try_cast(data_stats[1]),
'blogs' : err,
'edits' : try_cast(data_stats[5]),
'followers' : try_cast(data_stats[3]),
'following' : try_cast(data_stats[4]),
'name' : name,
'posts' : try_cast(data_stats[2]),
'questions' : try_cast(data_stats[0]),
'topics' : err,
'username' : user }
return user_dict
@staticmethod
def get_user_activity(user):
f = feedparser.parse('http://www.quora.com/' + user + '/rss')
dict = {
'username': user,
'last_updated': f.feed.updated
}
for entry in f.entries:
if 'activity' not in dict.keys():
dict['activity'] = []
dict['activity'].append(build_feed_item(entry))
return dict
@staticmethod
def get_activity(user):
f = feedparser.parse('http://www.quora.com/' + user + '/rss')
activity = Activity()
for entry in f.entries:
type = check_activity_type(entry['description'])
if type is not None:
if type == ACTIVITY_ITEM_TYPES.UPVOTE:
activity.upvotes.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.USER_FOLLOW:
activity.user_follows.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.QUESTION_FOLLOW:
activity.question_follows.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.ANSWER:
activity.answers.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.QUESTION:
activity.questions.append(build_feed_item(entry))
elif type == ACTIVITY_ITEM_TYPES.REVIEW_REQUEST:
activity.review_requests.append(build_feed_item(entry))
return activity
@staticmethod
def get_activity_keys():
return POSSIBLE_FEED_KEYS
class Activity:
def __init__(self, upvotes=[], user_follows=[], question_follows=[], answers=[], questions=[], review_requests=[]):
self.upvotes = upvotes
self.user_follows = user_follows
self.question_follows = question_follows
self.answers = answers
self.questions = questions
self.review_requests = review_requests
|
Python
| 0.999999 |
@@ -480,31 +480,27 @@
LLOW=2,
-QUESTION_FOLLOW
+WANT_ANSWER
=3, ANSW
@@ -1627,31 +1627,27 @@
M_TYPES.
-QUESTION_FOLLOW
+WANT_ANSWER
%0A
@@ -4209,23 +4209,19 @@
PES.
-QUESTION_FOLLOW
+WANT_ANSWER
:%0A
@@ -4247,31 +4247,27 @@
ctivity.
-question_follow
+want_answer
s.append
@@ -4861,31 +4861,27 @@
ows=%5B%5D,
-question_follow
+want_answer
s=%5B%5D, an
|
878d0b793c8efa1dafc246326d519685032ee9a7
|
Add new package: influxdb (#17909)
|
var/spack/repos/builtin/packages/influxdb/package.py
|
var/spack/repos/builtin/packages/influxdb/package.py
|
Python
| 0 |
@@ -0,0 +1,1693 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0Aimport platform%0A%0A_versions = %7B%0A '1.8.1': %7B%0A 'Linux_amd64': ('64e60e438ac8a8fdacc6623f238c40bffae31c795642146d70eb316533d3d70f', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.1-static_linux_amd64.tar.gz'),%0A 'Linux_aarch64': ('fd5d7c962827ab1ccae27f6504595fdcd30c20d505b8e07d8573e274824e1366', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.1_linux_arm64.tar.gz')%7D,%0A '1.8.0': %7B%0A 'Linux_amd64': ('aedc5083ae2e61ef374dbde5044ec2a5b27300e73eb92ccd135e6ff9844617e2', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.0-static_linux_amd64.tar.gz'),%0A 'Linux_aarch64': ('e76c36c10e46c2fd17820156b290dd776a465da0298496af5d490e555504b079', 'https://dl.influxdata.com/influxdb/releases/influxdb-1.8.0_linux_arm64.tar.gz')%7D,%0A%7D%0A%0A%0Aclass Influxdb(Package):%0A %22%22%22InfluxDB is an open source time series platform.%22%22%22%0A%0A homepage = %22https://influxdata.com/%22%0A url = %22https://dl.influxdata.com/influxdb/releases/influxdb-1.8.1-static_linux_amd64.tar.gz%22%0A%0A for ver, packages in _versions.items():%0A key = %22%7B0%7D_%7B1%7D%22.format(platform.system(), platform.machine())%0A pkg = packages.get(key)%0A if pkg:%0A version(ver, sha256=pkg%5B0%5D, url=pkg%5B1%5D)%0A%0A def setup_run_environment(self, env):%0A env.prepend_path('PATH', self.prefix.usr.bin)%0A%0A def install(self, spec, prefix):%0A install_tree('usr', prefix)%0A install_tree('etc', prefix.etc)%0A install_tree('var', prefix.var)%0A
|
|
c1117f05934f76da72652d574aa240245d4724ff
|
new package (#6874)
|
var/spack/repos/builtin/packages/opa-psm2/package.py
|
var/spack/repos/builtin/packages/opa-psm2/package.py
|
Python
| 0.618644 |
@@ -0,0 +1,2468 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass OpaPsm2(MakefilePackage):%0A %22%22%22 Intel Omni-Path Performance Scaled Messaging 2 (PSM2) library%22%22%22%0A%0A homepage = %22http://github.com/01org/opa-psm2%22%0A url = %22https://github.com/01org/opa-psm2/archive/PSM2_10.3-8.tar.gz%22%0A%0A version('10.3-37', '9bfca04f29b937b3856f893e1f8b1b60')%0A version('10.3-17', 'e7263eb449939cb87612e2c7623ca21c')%0A version('10.3-10', '59d36b49eb126f980f3272a9d66a8e98')%0A version('10.3-8', '07bc5cb2a6bf1189a29cbea836843db2')%0A version('10.2-260', '71df31b5776be64ff243417ac88eec66')%0A version('10.2-235', '23539f725a597bf2d35aac47a793a37b')%0A version('10.2-175', 'c542b8641ad573f08f61d0a6a70f4013')%0A%0A depends_on('numactl')%0A%0A def setup_environment(self, spack_env, run_env):%0A spack_env.set('DESTDIR', self.prefix)%0A run_env.prepend_path('CPATH',%0A join_path(self.prefix, 'usr', 'include'))%0A run_env.prepend_path('LIBRARY_PATH',%0A join_path(self.prefix, 'usr', 'lib64'))%0A run_env.prepend_path('LD_LIBRARY_PATH',%0A join_path(self.prefix, 'usr', 'lib64'))%0A%0A def install(self, spec, prefix):%0A make('--environment-overrides', 'install')%0A
|
|
60e64bac0a3cadccd0f35b5c8e8770b4edf0afff
|
add new package (#21793)
|
var/spack/repos/builtin/packages/virtuoso/package.py
|
var/spack/repos/builtin/packages/virtuoso/package.py
|
Python
| 0 |
@@ -0,0 +1,2429 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0A%0Afrom spack import *%0A%0A%0Aclass Virtuoso(AutotoolsPackage):%0A %22%22%22Virtuoso is a high-performance and scalable Multi-Model RDBMS,%0A Data Integration Middleware, Linked Data Deployment, and HTTP%0A Application Server Platform%22%22%22%0A%0A homepage = %22https://github.com/openlink/virtuoso-opensource%22%0A git = %22https://github.com/openlink/virtuoso-opensource.git%22%0A%0A version('7.2.5.1-dev', commit='3ff1d4b3de3977337baf909c264968b9f70b7d2c')%0A%0A variant('dbpedia-vad', default=False, description='DBpedia vad package')%0A variant('demo-vad', default=False, description='Demo vad package')%0A variant('fct-vad', default=True, description='Facet Browser vad package')%0A variant('ods-vad', default=True, description='ODS vad package')%0A variant('sparqldemo-vad', default=False, description='Sparql Demo vad package')%0A variant('tutorial-vad', default=False, description='Tutorial vad package')%0A variant('isparql-vad', default=True, description='iSPARQL vad package')%0A variant('rdfmappers-vad', default=True, description='RDF Mappers vad package')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A%0A depends_on('bison', type='build')%0A depends_on('flex', type='build')%0A depends_on('gperf')%0A depends_on('readline')%0A depends_on('[email protected]:1.1.99')%0A%0A def autoreconf(self, spec, prefix):%0A bash = which('bash')%0A bash('./autogen.sh')%0A%0A def configure_args(self):%0A readlinep = self.spec%5B'readline'%5D.prefix.lib%0A args = %5B'--with-layout=opt',%0A '--program-transform-name=s/isql/isql-v/',%0A '--with-readline=%7B0%7D'.format(readlinep)%0A %5D%0A args.extend(self.enable_or_disable('dbpedia-vad'))%0A args.extend(self.enable_or_disable('demo-vad'))%0A args.extend(self.enable_or_disable('fct-vad'))%0A args.extend(self.enable_or_disable('ods-vad'))%0A args.extend(self.enable_or_disable('sparqldemo-vad'))%0A args.extend(self.enable_or_disable('tutorial-vad'))%0A args.extend(self.enable_or_disable('isparql-vad'))%0A args.extend(self.enable_or_disable('rdfmappers-vad'))%0A return args%0A
|
|
39b63523634801fe8ef2cca03e11b3875d84cdbd
|
Tweak syntax for f.close() concision, add typehints
|
flare/flare_io.py
|
flare/flare_io.py
|
from flare.struc import Structure
from typing import List
from json import dump, load
from flare.util import NumpyEncoder
def md_trajectory_to_file(filename, structures: List[Structure]):
"""
Take a list of structures and write them to a json file.
:param filename:
:param structures:
"""
f = open(filename, 'w')
dump([s.as_dict() for s in structures], f, cls=NumpyEncoder)
f.close()
def md_trajectory_from_file(filename):
"""
Read a list of structures from a json file, formatted as in md_trajectory_to_file.
:param filename:
"""
f = open(filename, 'r')
structure_list = load(f)
structures = [Structure.from_dict(dictionary) for dictionary in structure_list]
return structures
|
Python
| 0 |
@@ -150,16 +150,21 @@
filename
+: str
, struct
@@ -286,35 +286,36 @@
ructures:%0A%09%22%22%22%0A%09
-f =
+with
open(filename,
@@ -318,17 +318,24 @@
me, 'w')
-%0A
+ as f:%0A%09
%09dump(%5Bs
@@ -391,19 +391,8 @@
der)
-%0A%09f.close()
%0A%0Ade
@@ -425,16 +425,21 @@
filename
+: str
):%0A%09%22%22%22%0A
@@ -550,11 +550,12 @@
%22%22%0A%09
-f =
+with
ope
@@ -570,17 +570,24 @@
me, 'r')
-%0A
+ as f:%0A%09
%09structu
@@ -604,16 +604,17 @@
load(f)%0A
+%09
%09structu
|
cb0d6124ea31e8fb9ff8957072a2b881b882127e
|
Add Timelapse script for sunrise timelapses
|
examples/hero9_timelapse_webcam.py
|
examples/hero9_timelapse_webcam.py
|
Python
| 0 |
@@ -0,0 +1,1400 @@
+import sys%0Aimport time%0Afrom goprocam import GoProCamera, constants%0Aimport threading%0Aimport logging%0A%0A%22%22%22%0AI use PM2 to start my GoPro cameras, using a Raspberry Pi 4, works perfectly.%0A%0Apm2 start timelapse.py --cron %2230 7 * * *%22 --log timelapse.log --no-autorestart%0A%0AThis script will overrride some settings for reliability:%0A%0AVoice control: OFF%0AAutoPower off: NEVER%0ABeeps: OFF (Do not want the camera beeping at 6AM)%0A%0ANightLapse configuration left untouched, I recommend always using Auto shutter for sunrise and locking the White Balance to 4000k or higher.%0A%22%22%22%0A%0A%0Adef start_timelapse(interface):%0A gopro = GoProCamera.GoPro(ip_address=GoProCamera.GoPro.getWebcamIP(%0A interface), camera=constants.gpcontrol, webcam_device=interface)%0A logging.info(%0A %22Started goprocam instance with interface %7B%7D%22.format(interface))%0A gopro.gpControlSet(constants.Setup.VOICE_CONTROL,%0A constants.Setup.VoiceControl.OFF)%0A gopro.gpControlSet(constants.Setup.AUTO_OFF, constants.Setup.AutoOff.Never)%0A logging.info(%22All config set%22)%0A gopro.mode(constants.Mode.MultiShotMode,%0A constants.Mode.SubMode.MultiShot.NightLapse)%0A gopro.shutter(constants.start)%0A logging.info(%22Started timelapse%22)%0A%0A%0Acameras = sys.argv%5B1%5D%0Acameras = cameras.split(%22,%22)%0A%0Afor interface in cameras:%0A thr = threading.Thread(target=start_timelapse, args=(interface,))%0A thr.start()%0A
|
|
9d63571c5add6ff2ff064f41c9bc97a6943a69e9
|
add missing migration
|
toucan/invitations/migrations/0002_auto_20161009_2158.py
|
toucan/invitations/migrations/0002_auto_20161009_2158.py
|
Python
| 0.000258 |
@@ -0,0 +1,604 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.2 on 2016-10-09 21:58%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('invitations', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='toucaninvitation',%0A options=%7B'ordering': %5B'-pk'%5D%7D,%0A ),%0A migrations.AlterField(%0A model_name='toucaninvitation',%0A name='invitation_sent',%0A field=models.DateTimeField(blank=True, null=True),%0A ),%0A %5D%0A
|
|
4651e178ddbeac9211f8170e2e20f8a35ff0e3ab
|
Add a simple CLI script for writing out results of scripts
|
ocradmin/plugins/test_nodetree.py
|
ocradmin/plugins/test_nodetree.py
|
Python
| 0 |
@@ -0,0 +1,1233 @@
+#!/usr/bin/python%0A%0Aimport os%0Aimport sys%0Aimport json%0Asys.path.append(os.path.abspath(%22..%22))%0Aos.environ%5B'DJANGO_SETTINGS_MODULE'%5D = 'ocradmin.settings'%0A%0Asys.path.insert(0, %22lib%22)%0A%0Afrom nodetree import script%0Afrom nodetree.manager import ModuleManager%0A%0Adef run(nodelist, outpath):%0A manager = ModuleManager()%0A manager.register_module(%22ocradmin.plugins.ocropus_nodes%22)%0A manager.register_module(%22ocradmin.plugins.tesseract_nodes%22)%0A manager.register_module(%22ocradmin.plugins.cuneiform_nodes%22)%0A manager.register_module(%22ocradmin.plugins.numpy_nodes%22)%0A manager.register_module(%22ocradmin.plugins.pil_nodes%22)%0A%0A s = script.Script(nodelist, manager=manager)%0A term = s.get_terminals()%5B0%5D%0A print %22Rendering to %25s%22 %25 outpath%0A out = manager.get_new_node(%22Ocropus::FileOut%22, label=%22Output%22,%0A params=%5B(%22path%22, os.path.abspath(outpath))%5D)%0A out.set_input(0, term)%0A out.eval()%0A%0Aif __name__ == %22__main__%22:%0A if len(sys.argv) %3C 3:%0A print %22Usage: %25s %3Cscript%3E %3Coutput%3E%22 %25 sys.argv%5B0%5D%0A sys.exit(1)%0A%0A nodes = None%0A with open(sys.argv%5B1%5D, %22r%22) as f:%0A nodes = json.load(f)%0A%0A if nodes is None:%0A print %22No nodes found in script%22%0A sys.exit(1)%0A%0A run(nodes, sys.argv%5B2%5D)%0A%0A
|
|
07c0ce9138f4e14c16e6223912d7fb53ab2db8ad
|
Version 4.1
|
files/agent-find-rwx.py
|
files/agent-find-rwx.py
|
Python
| 0 |
@@ -0,0 +1,296 @@
+for pid, procinfo in VmmPy_ProcessListInformation().items():%0A try:%0A memmap = VmmPy_ProcessGetMemoryMap(pid, True)%0A for entry in memmap:%0A if '-rwx' in entry%5B'flags'%5D:%0A print(str(pid) + ': ' + procinfo%5B'name'%5D + ': ' + str(entry))%0A except:%0A pass
|
|
74fec13ce5ca0f011c8970e7664727b422597d9a
|
Add tools.genetics module (#41, #36))
|
xentica/tools/genetics.py
|
xentica/tools/genetics.py
|
Python
| 0 |
@@ -0,0 +1,307 @@
+%22%22%22A collection of functions allowing genetics manipulations.%22%22%22%0A%0A%0Adef genome_crossover(*genomes):%0A %22%22%22%0A Crossover given genomes in stochastic way.%0A%0A :param genomes: A list of genomes (integers) to crossover%0A%0A :returns: Single integer, a resulting genome.%0A%0A %22%22%22%0A raise NotImplementedError%0A
|
|
5f0ebdb043a313a784c723c36b87d316e276629f
|
Add recurring events experiment
|
experiments/python/pygame_recurring_events.py
|
experiments/python/pygame_recurring_events.py
|
Python
| 0.000092 |
@@ -0,0 +1,365 @@
+import pygame%0Aimport pygame.joystick%0Afrom pygame.locals import *%0A%0Adef main():%0A pygame.init()%0A clock = pygame.time.Clock()%0A ue = pygame.event.Event(USEREVENT, %7B'code':'drop'%7D)%0A pygame.time.set_timer(127, 500)%0A while(True):%0A events = pygame.event.get()%0A for e in events:%0A print(e)%0A clock.tick(2)%0A%0Aif __name__ == %22__main__%22:%0A main()%0A
|
|
4d34906eba347f56a13b193efa3cedb3f2ab2a24
|
Add tests for ironic-dbsync.
|
ironic/tests/test_dbsync.py
|
ironic/tests/test_dbsync.py
|
Python
| 0 |
@@ -0,0 +1,1099 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A# -*- encoding: utf-8 -*-%0A#%0A# vim: tabstop=4 shiftwidth=4 softtabstop=4%0A#%0A# Copyright 2013 Hewlett-Packard Development Company, L.P.%0A# All Rights Reserved.%0A#%0A# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Afrom ironic.db import migration%0Afrom ironic.tests.db import base%0A%0A%0Aclass DbSyncTestCase(base.DbTestCase):%0A def setUp(self):%0A super(DbSyncTestCase, self).setUp()%0A%0A def test_sync_and_version(self):%0A migration.db_sync()%0A v = migration.db_version()%0A self.assertTrue(v %3E migration.INIT_VERSION)%0A
|
|
013ab6f195510a5097ba68c2e39277b154635bb7
|
Set up the IDE device correctly, ie. with and using legacy ports.
|
src/dev/x86/SouthBridge.py
|
src/dev/x86/SouthBridge.py
|
# Copyright (c) 2008 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
from m5.params import *
from m5.proxy import *
from Cmos import Cmos
from I8042 import I8042
from I82094AA import I82094AA
from I8237 import I8237
from I8254 import I8254
from I8259 import I8259
from Ide import IdeController
from PcSpeaker import PcSpeaker
from X86IntPin import X86IntLine
from m5.SimObject import SimObject
def x86IOAddress(port):
IO_address_space_base = 0x8000000000000000
return IO_address_space_base + port;
class SouthBridge(SimObject):
type = 'SouthBridge'
pio_latency = Param.Latency('1ns', "Programmed IO latency in simticks")
platform = Param.Platform(Parent.any, "Platform this device is part of")
_pic1 = I8259(pio_addr=x86IOAddress(0x20), mode='I8259Master')
_pic2 = I8259(pio_addr=x86IOAddress(0xA0), mode='I8259Slave')
_cmos = Cmos(pio_addr=x86IOAddress(0x70))
_dma1 = I8237(pio_addr=x86IOAddress(0x0))
_keyboard = I8042(data_port=x86IOAddress(0x60), \
command_port=x86IOAddress(0x64))
_pit = I8254(pio_addr=x86IOAddress(0x40))
_speaker = PcSpeaker(pio_addr=x86IOAddress(0x61))
_io_apic = I82094AA(pio_addr=0xFEC00000)
# This is to make sure the interrupt lines are instantiated. Don't use
# it for anything directly.
int_lines = VectorParam.X86IntLine([], "Interrupt lines")
pic1 = Param.I8259(_pic1, "Master PIC")
pic2 = Param.I8259(_pic2, "Slave PIC")
cmos = Param.Cmos(_cmos, "CMOS memory and real time clock device")
dma1 = Param.I8237(_dma1, "The first dma controller")
keyboard = Param.I8042(_keyboard, "The keyboard controller")
pit = Param.I8254(_pit, "Programmable interval timer")
speaker = Param.PcSpeaker(_speaker, "PC speaker")
io_apic = Param.I82094AA(_io_apic, "I/O APIC")
def connectPins(self, source, sink):
self.int_lines.append(X86IntLine(source=source, sink=sink))
# IDE controller
ide = IdeController(disks=[], pci_func=0, pci_dev=4, pci_bus=0)
ide.BAR0 = 0x1f0
ide.BAR0LegacyIO = True
ide.BAR1 = 0x3f4
ide.BAR1Size = '3B'
ide.BAR1LegacyIO = True
ide.BAR2 = 0x170
ide.BAR2LegacyIO = True
ide.BAR3 = 0x374
ide.BAR3Size = '3B'
ide.BAR3LegacyIO = True
ide.BAR4 = 1
ide.Command = 1
ide.InterruptLine = 14
ide.InterruptPin = 1
def attachIO(self, bus):
# Route interupt signals
self.connectPins(self.pic1.output, self.io_apic.pin(0))
self.connectPins(self.pic2.output, self.pic1.pin(2))
self.connectPins(self.cmos.int_pin, self.pic2.pin(0))
self.connectPins(self.pit.int_pin, self.pic1.pin(0))
self.connectPins(self.pit.int_pin, self.io_apic.pin(2))
# self.connectPins(self.keyboard.keyboard_int_pin,
# self.pic1.pin(1))
self.connectPins(self.keyboard.keyboard_int_pin,
self.io_apic.pin(1))
# self.connectPins(self.keyboard.mouse_int_pin,
# self.pic2.pin(4))
self.connectPins(self.keyboard.mouse_int_pin,
self.io_apic.pin(12))
# Tell the devices about each other
self.pic1.slave = self.pic2
self.speaker.i8254 = self.pit
self.io_apic.external_int_pic = self.pic1
# Connect to the bus
self.cmos.pio = bus.port
self.dma1.pio = bus.port
self.ide.pio = bus.port
self.keyboard.pio = bus.port
self.pic1.pio = bus.port
self.pic2.pio = bus.port
self.pit.pio = bus.port
self.speaker.pio = bus.port
self.io_apic.pio = bus.port
self.io_apic.int_port = bus.port
|
Python
| 0 |
@@ -3776,17 +3776,39 @@
mmand =
-1
+0%0A ide.ProgIF = 0x80
%0A ide
|
3498c5e2c778c924397af8124544c2ce230e4d46
|
reorder contribute form (bug 744631)
|
apps/mozorg/forms.py
|
apps/mozorg/forms.py
|
from django import forms
from django.forms import widgets
from django.utils.safestring import mark_safe
from django.core.validators import EMPTY_VALUES
from product_details import product_details
FORMATS = (('H', 'HTML'), ('T', 'Text'))
class SideRadios(widgets.RadioFieldRenderer):
"""Render radio buttons as labels"""
def render(self):
radios = [unicode(w) for idx, w in enumerate(self)]
return mark_safe(''.join(radios))
class PrivacyWidget(widgets.CheckboxInput):
"""Render a checkbox with privacy text. Lots of pages need this so
it should be standardized"""
def render(self, name, value, attrs=None):
attrs['required'] = 'true'
input_txt = super(PrivacyWidget, self).render(name, value, attrs)
return mark_safe(
'<label for="%s" class="privacy-check-label">'
'%s '
'<span class="title">I agree to the '
'<a href="/en-US/privacy-policy">Privacy Policy</a>'
'</span></label>'
% (attrs['id'], input_txt)
)
class EmailInput(widgets.TextInput):
input_type = 'email'
class NewsletterForm(forms.Form):
email = forms.EmailField(widget=EmailInput(attrs={'required':'true'}))
fmt = forms.ChoiceField(widget=forms.RadioSelect(renderer=SideRadios),
choices=FORMATS,
initial='H')
privacy = forms.BooleanField(widget=PrivacyWidget)
class NewsletterCountryForm(NewsletterForm):
def __init__(self, locale, *args, **kwargs):
regions = product_details.get_regions(locale)
regions = sorted(regions.iteritems(), key=lambda x: x[1])
locale = locale.lower()
if locale.find('-') != -1:
locale = locale.split('-')[1]
super(NewsletterCountryForm, self).__init__(*args, **kwargs)
self.fields['country'] = forms.ChoiceField(choices=regions,
initial=locale)
INTEREST_CHOICES = (('', 'Area of interest?'),
('Support', 'Helping Users'),
('Localization', 'Localization'),
('QA', 'Testing and QA'),
('Coding', 'Coding'),
('Add-ons', 'Add-ons'),
('Marketing', 'Marketing'),
('Students', 'Student Reps'),
('Webdev', 'Web Development'),
('Documentation', 'Developer Documentation'),
('IT', 'Systems Administration'),
('Research', 'User Research'),
('Thunderbird', 'Thunderbird'),
('Accessibility', 'Accessibility'),
('Firefox Suggestions', 'I have a suggestion for Firefox'),
('Firefox Issue', 'I need help with a Firefox issue'),
(' ', 'Other'))
class ContributeForm(forms.Form):
email = forms.EmailField(widget=EmailInput(attrs={'required':'true'}))
privacy = forms.BooleanField(widget=PrivacyWidget)
newsletter = forms.BooleanField(required=False)
interest = forms.ChoiceField(choices=INTEREST_CHOICES)
comments = forms.CharField(widget=forms.widgets.Textarea(attrs={'required':'true', 'rows':'', 'cols':''}))
|
Python
| 0 |
@@ -2678,24 +2678,60 @@
sibility'),%0A
+ (' ', 'Other'),%0A
@@ -2875,44 +2875,8 @@
ue')
-,%0A (' ', 'Other')
)%0A%0Ac
|
71849714d809e882838e109d6906086b47fe68c5
|
Create hubspot-amulet.py
|
CiO/hubspot-amulet.py
|
CiO/hubspot-amulet.py
|
Python
| 0 |
@@ -0,0 +1,299 @@
+import itertools%0A%0A%0Adef checkio(m):%0A for a, b, c in itertools.product(range(-180,181), repeat=3):%0A if not (a + b*m%5B1%5D%5B0%5D + c*m%5B2%5D%5B0%5D)%25360:%0A if (a*m%5B0%5D%5B1%5D + b + c*m%5B2%5D%5B1%5D)%25360 == 225:%0A if (a*m%5B0%5D%5B2%5D + b*m%5B1%5D%5B2%5D + c)%25360 == 315:%0A return %5Ba, b, c%5D%0A
|
|
5156c590b43f5a2bf577da224829c20bc3b97230
|
add first example
|
libs/utils/filters.py
|
libs/utils/filters.py
|
Python
| 0 |
@@ -0,0 +1,1517 @@
+%0A# import glob%0A# import matplotlib.gridspec as gridspec%0A# import matplotlib.pyplot as plt%0A# import numpy as np%0A# import os%0A# import pandas as pd%0A# import pylab as pl%0A# import re%0A# import sys%0A# import trappy%0A%0A# Configure logging%0Aimport logging%0A%0Aclass Filters(object):%0A%0A def __init__(self, trace, tasks=None):%0A self.trace = trace%0A self.tasks = tasks%0A%0A def topBigTasks(self, max_tasks=10, min_samples=100, min_utilization=None):%0A %22%22%22%0A Tasks which had a 'utilization' bigger than the specified threshold%0A %22%22%22%0A%0A if min_utilization is None:%0A min_utilization = self.trace.platform%5B'nrg_model'%5D%5B'little'%5D%5B'cpu'%5D%5B'cap_max'%5D%0A%0A df = self.trace.df('tload')%0A big_tasks_events = df%5Bdf.utilization %3E min_utilization%5D%0A big_tasks = big_tasks_events.pid.unique()%0A%0A big_tasks_count = big_tasks.size%0A print 'Total %7B%7D tasks with at least %7B%7D %22utilization%22 samples %3E %7B%7D'%5C%0A .format(big_tasks_count, min_samples, min_utilization)%0A%0A big_tasks_stats = big_tasks_events.groupby('pid')%5C%0A .describe(include=%5B'object'%5D);%0A big_tasks_pids = big_tasks_stats.unstack()%5B'comm'%5D%5C%0A .sort(columns=%5B'count'%5D, ascending=False)%0A big_tasks_pids = big_tasks_pids%5Bbig_tasks_pids%5B'count'%5D %3E min_samples%5D%0A%0A big_topmost = big_tasks_pids.head(max_tasks)%0A print 'Top %7B%7D %22big%22 tasks:'.format(max_tasks)%0A print big_topmost%0A%0A return list(big_topmost.index)%0A%0A
|
|
4137fd528367cfc5caf8c89665e1d2ee0ceb8385
|
Clean function cleans data correctly
|
Projects/Tracking/clean.py
|
Projects/Tracking/clean.py
|
Python
| 0.000004 |
@@ -0,0 +1,1423 @@
+import pandas as pd%0Afrom Code.config import get_path%0A%0A%0Adef clean_ble(df):%0A%0A PATH = get_path()%0A%0A # Remove unwanted columns%0A df = df.drop(%5B'clientmac', 'proximity', 'type', 'probetime_gmt', 'probetime'%5D, axis=1)%0A # Rename column headers%0A df.columns=%5B'id', 'datetime', 'sensor', 'power', 'rssi', 'accuracy'%5D%0A # Merge with stand locations%0A sensor_stand_loc = pd.merge(%0A pd.DataFrame(pd.read_csv(PATH.replace('BLE/Data', 'Location/', 1) + '/stand_locations.txt',%0A sep='%5Ct')),%0A pd.DataFrame(pd.read_csv(PATH.replace('BLE/Data', 'Location/', 1) + '/sensor_locations.txt',%0A sep='%5Ct')),%0A left_on='id',%0A right_on='id_location').drop('id', axis=1)%0A # Merge with location data%0A df = pd.DataFrame(pd.merge(df,%0A sensor_stand_loc,%0A left_on='sensor',%0A right_on='name',%0A how='outer').drop(%5B'name', 'type'%5D, axis=1))%0A # Map IDs to enumerated%0A map_id = %7Bid: i for i, id in enumerate(set(df%5B'id'%5D))%7D%0A df%5B'id'%5D = df%5B'id'%5D.map(map_id)%0A # Map Sensors to enumerated%0A map_sensors = %7Bsensor: i for i, sensor in enumerate(set(df%5B'sensor'%5D))%7D%0A df%5B'sensor'%5D = df%5B'sensor'%5D.map(map_sensors)%0A # Map datetime strings to datetime%0A df%5B'datetime'%5D = pd.to_datetime(df%5B'datetime'%5D)%0A return df%0A
|
|
c0cf6739d3b10868eaae246c36eb691bff42a5aa
|
Add webdriver base test suite.
|
QA/pycopia/QA/webdriver.py
|
QA/pycopia/QA/webdriver.py
|
Python
| 0 |
@@ -0,0 +1,1549 @@
+#!/usr/bin/python2.5%0A# -*- coding: us-ascii -*-%0A# vim:ts=4:sw=4:softtabstop=4:smarttab:expandtab%0A#%0A# Copyright (C) 2010 Keith Dart %[email protected]%3E%0A#%0A# This library is free software; you can redistribute it and/or%0A# modify it under the terms of the GNU Lesser General Public%0A# License as published by the Free Software Foundation; either%0A# version 2.1 of the License, or (at your option) any later version.%0A#%0A# This library is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU%0A# Lesser General Public License for more details.%0A%0Afrom __future__ import absolute_import%0A%0A%22%22%22%0ABase class for webdriver test suites. Pre-instantiates a webdriver%0Ainstance and fetches the DUT service target.%0A%22%22%22%0A%0A# TODO other target browser support%0Afrom selenium.firefox.webdriver import WebDriver%0A%0Afrom pycopia.QA import core%0A%0A%0Aclass WebdriverSuite(core.TestSuite):%0A %22%22%22Webdriver test suite. %0A%0A Add webdriver test cases to this suite. The %22webdriver%22 object will be%0A at the root of the configuration.%0A %22%22%22%0A%0A def initialize(self):%0A cf = self.config%0A target_url = cf.environment.DUT.get_url(cf.get(%22serviceprotocol%22), cf.get(%22servicepath%22))%0A self.info(%22Target URL is: %25s%22 %25 (target_url,))%0A cf.webdriver = WebDriver()%0A cf.webdriver.get(target_url)%0A%0A def finalize(self):%0A wd = self.config.webdriver%0A del self.config.webdriver%0A wd.quit()%0A%0A%0A
|
|
b1f22b33a60da2b27ac17090c0fa759e510dd051
|
Create rlmradio.py
|
rlmradio.py
|
rlmradio.py
|
Python
| 0.000001 |
@@ -0,0 +1,534 @@
+#Grabs Current Info for RLM Radio Stream%0A%0Aimport requests%0Aimport re%0Afrom cloudbot import hook%0Aimport urllib%0Aimport urllib.request%0A%0Aurl = %22http://38.135.36.125:7359/7.html%22%0A%0A%[email protected](%22rlmradio%22, autohelp=False)%0Adef rlmradio(text):%0A%0A url = %22http://38.135.36.125:7359/7.html%22%0A html = urllib.request.urlopen(url).read()%0A htmlout = html%5B28:-15%5D%0A pw_bytes = htmlout.decode(%22utf-8%22)%0A filtered = pw_bytes.replace(%22'%22, %22'%22)%0A filtered = %22Now on the RLM Radio Stream: %22 + filtered%0A out = filtered%0A return out%0A%0A
|
|
bc4b25b9c24ef0db58acbd1c8e24b3fee319314b
|
Solve challenge 13
|
Challenges/chall_13.py
|
Challenges/chall_13.py
|
Python
| 0.001318 |
@@ -0,0 +1,955 @@
+#!/Applications/anaconda/envs/Python3/bin%0A# Python challenge - 13%0A# http://www.pythonchallenge.com/pc/return/disproportional.html%0A# http://www.pythonchallenge.com/pc/phonebook.php%0A%0Aimport xmlrpc.client%0A%0Adef main():%0A '''%0A Hint: phone that evil%0A %3Carea shape=%22circle%22 coords=%22326,177,45%22 href=%22../phonebook.php%22%3E%0A '''%0A xml_string = '%3CmethodResponse%3E%3Cfault%3E%3Cvalue%3E%3Cstruct%3E%3Cmember%3E%3Cname%3EfaultCode%3C/name%3E%3Cvalue%3E%3Cint%3E105%3C/int%3E%3C/value%3E%3C/member%3E%3Cmember%3E%3Cname%3EfaultString%3C/name%3E%3Cvalue%3E%3Cstring%3EXML error: Invalid document end at line 1, column 1%3C/string%3E%3C/value%3E%3C/member%3E%3C/struct%3E%3C/value%3E%3C/fault%3E%3C/methodResponse%3E'%0A server_url = 'http://www.pythonchallenge.com/pc/phonebook.php'%0A%0A with xmlrpc.client.ServerProxy(server_url) as server_proxy:%0A try:%0A print(server_proxy.phone('Bert')) # 555-ITALY%0A except Exception as e:%0A print('Error', e)%0A%0A return 0%0A%0A# Keyword: italy%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
78ea9019850dbf9b88d3f65a4a61139f01d2c496
|
Add scratch genbank-gff-to-nquads.py which just opens a gff file atm
|
src/genbank-gff-to-nquads.py
|
src/genbank-gff-to-nquads.py
|
Python
| 0 |
@@ -0,0 +1,230 @@
+#!/usr/bin/env python%0A%0Aimport jargparse%0A%0Aparser = jargparse.ArgParser('Convert Genbank GFF into an n-quad file')%0Aparser.add_argument('gffPath', help='path to the GFF')%0Aargs = parser.parse_args()%0A%0Awith open(args.gffPath):%0A pass%0A
|
|
48b724d7a2163c50be60d98933132b51347940bd
|
Create longest-line-of-consecutive-one-in-a-matrix.py
|
Python/longest-line-of-consecutive-one-in-a-matrix.py
|
Python/longest-line-of-consecutive-one-in-a-matrix.py
|
Python
| 0.999548 |
@@ -0,0 +1,853 @@
+# Time: O(m * n)%0A# Space: O(n)%0A%0Aclass Solution(object):%0A def longestLine(self, M):%0A %22%22%22%0A :type M: List%5BList%5Bint%5D%5D%0A :rtype: int%0A %22%22%22%0A if not M: return 0%0A result = 0%0A dp = %5B%5B%5B0%5D * 4 for _ in xrange(len(M%5B0%5D))%5D for _ in xrange(2)%5D%0A for i in xrange(len(M)):%0A for j in xrange(len(M%5B0%5D)):%0A dp%5Bi %25 2%5D%5Bj%5D%5B:%5D = %5B0%5D * 4%0A if M%5Bi%5D%5Bj%5D == 1:%0A dp%5Bi %25 2%5D%5Bj%5D%5B0%5D = dp%5Bi %25 2%5D%5Bj - 1%5D%5B0%5D+1 if j %3E 0 else 1%0A dp%5Bi %25 2%5D%5Bj%5D%5B1%5D = dp%5B(i-1) %25 2%5D%5Bj%5D%5B1%5D+1 if i %3E 0 else 1%0A dp%5Bi %25 2%5D%5Bj%5D%5B2%5D = dp%5B(i-1) %25 2%5D%5Bj-1%5D%5B2%5D+1 if (i %3E 0 and j %3E 0) else 1%0A dp%5Bi %25 2%5D%5Bj%5D%5B3%5D = dp%5B(i-1) %25 2%5D%5Bj+1%5D%5B3%5D+1 if (i %3E 0 and j %3C len(M%5B0%5D)-1) else 1%0A result = max(result, max(dp%5Bi %25 2%5D%5Bj%5D))%0A return result%0A
|
|
148b2d3c9025f234ecc3ede02c39cfcb6295f39a
|
1089. Duplicate Zeros
|
problems/test_1089_manual_copy.py
|
problems/test_1089_manual_copy.py
|
Python
| 0.999999 |
@@ -0,0 +1,957 @@
+import unittest%0Afrom typing import List%0A%0Aimport utils%0A%0A%0A# O(n) time. O(1) space. Array.%0Aclass Solution:%0A def duplicateZeros(self, arr: List%5Bint%5D) -%3E None:%0A %22%22%22%0A Do not return anything, modify arr in-place instead.%0A %22%22%22%0A num_zeros = sum(num == 0 for num in arr)%0A%0A if num_zeros == 0:%0A return%0A%0A hi = len(arr) + num_zeros - 1%0A%0A for lo in range(len(arr) - 1, -1, -1):%0A num = arr%5Blo%5D%0A%0A if hi %3C len(arr):%0A arr%5Bhi%5D = num%0A hi -= 1%0A%0A if num == 0:%0A if hi %3C len(arr):%0A arr%5Bhi%5D = 0%0A hi -= 1%0A%0A%0Aclass Test(unittest.TestCase):%0A def test(self):%0A cases = utils.load_test_json(__file__).test_cases%0A%0A for case in cases:%0A Solution().duplicateZeros(**case.args._asdict())%0A self.assertEqual(case.expected, case.args.arr)%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
23138ab91e5ac0ecf92a0968bf8e4abfa7d0c763
|
Remove duplicates in all subdirectories - working raw version.
|
removedups.py
|
removedups.py
|
Python
| 0 |
@@ -0,0 +1,2438 @@
+import hashlib, csv, os%0A%0Adef md5(fname):%0A hash_md5 = hashlib.md5()%0A with open(fname, %22rb%22) as f:%0A for chunk in iter(lambda: f.read(4096), b%22%22):%0A hash_md5.update(chunk)%0A return hash_md5.hexdigest()%0A%0Adef process_directory_csv(current_dir_fullpath, sub_dir_list, files, csvwriter):%0A for file in files:%0A full_name = current_dir_fullpath + '/' + file%0A # print(%22 %22 + full_name)%0A csvwriter.writerow(%5Bmd5(full_name), str(os.path.getsize(full_name)), full_name%5D)%0A%0Adef walk_all_subdirectories(path, output_file_name):%0A # count = 0%0A with open(output_file_name, %22w%22) as csvfile:%0A csvwriter = csv.writer(csvfile, delimiter=':', quotechar='%7C', quoting=csv.QUOTE_MINIMAL)%0A for current_dir, sub_dirs, files in os.walk(path):%0A print(current_dir)%0A process_directory_csv(current_dir, sub_dirs, files, csvwriter)%0A csvfile.flush()%0A # DEBUG CODE - process only 5 directories%0A # count += 1%0A # if count %3E= 10:%0A # csvfile.close()%0A # break;%0A csvfile.close()%0A%0Adef sort_file(inname, outname):%0A input_file = open(inname, %22r%22)%0A output_file = open(outname, %22w%22, 1)%0A lines = %5B%5D # give lines variable a type of list%0A for line in input_file:%0A lines.append(line)%0A lines.sort()%0A for line in lines:%0A output_file.write(line)%0A input_file.close()%0A output_file.close()%0A%0Adef generate_delete_commands(sortedfile, outname):%0A import csv%0A output_file = open(outname, %22w%22, 1)%0A previous_checksum = %22IMPOSSIBLE_CHECKSUM%22%0A with open(sortedfile) as f:%0A reader = csv.reader(f, delimiter=':', quotechar='%7C', quoting=csv.QUOTE_MINIMAL)%0A for row in reader:%0A # print(row%5B0%5D, row)%0A if previous_checksum == row%5B0%5D:%0A output_file.write(%22rm '%22 + row%5B2%5D + %22'%5Cn%22)%0A print(%22removing %22 + row%5B2%5D)%0A os.remove(row%5B2%5D)%0A previous_checksum = row%5B0%5D%0A f.close()%0A output_file.close()%0A%0A# Main program follows%0A%0Adirectory_name = %22..%22%0Aunsorted_file_name = %22filelist.csv%22%0Asorted_file_name = %22sortedfilelist.csv%22%0Adelete_command_file_name = %22deletecommands.sh%22%0A%0Aif __name__ == '__main__':%0A walk_all_subdirectories('..', unsorted_file_name)%0A sort_file(unsorted_file_name, sorted_file_name)%0A generate_delete_commands(sorted_file_name, delete_command_file_name)%0A%0A
|
|
59736ee4dd82da7f7945723ec1cc89b19359b5c7
|
Create LargestPrimeFactor.py
|
LargestPrimeFactor.py
|
LargestPrimeFactor.py
|
Python
| 0.999046 |
@@ -0,0 +1,382 @@
+#! Christian Ng%0Abase = 0%0Aprint(%22Enter an integer:%22)%0Abase = int(raw_input())%0Aprint %22Largest Factor is:%22%0A%0Awhile (base %25 2) == 0:%0A base = base/2%0A %0Aif base == 1 or base == -1:%0A%09print %222%22%0A%0Aincrement = 3%0A%0Awhile base != 1 and base != -1: %0A %0A while base %25 increment == 0: %0A base = base/increment%0A %0A %0A %0A increment = increment + 2%0A%0Aprint increment - 2%0A%0A
|
|
e4414527c2d09bb03fa2a1ca0bb6f001ce43bcf4
|
Upgrade Robot
|
lib/robot/writer/formatters.py
|
lib/robot/writer/formatters.py
|
# Copyright 2008-2011 Nokia Siemens Networks Oyj
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from robot.writer.tableformatters import SingleLineHtmlFormatter
from .tableformatters import (RowSplittingFormatter, SplittingHtmlFormatter,
ColumnAligner, SettingTableAligner, NameCell, HeaderCell, HtmlCell)
class _TestDataFileFormatter(object):
def variable_rows(self, variables):
for row in self._variable_table_formatter().format_simple_table(variables):
yield self._format_row(row)
def setting_rows(self, settings):
for row in self._setting_table_formatter().format_simple_table(settings):
yield self._format_row(row)
def test_rows(self, tests):
for row in self._test_table_formatter(tests).format_indented_table(tests):
yield self._format_row(row)
def keyword_rows(self, keywords):
for row in self._keyword_table_formatter(keywords).format_indented_table(keywords):
yield self._format_row(row)
def empty_row(self):
return self._format_row([])
def _format_row(self, row):
return row
class TsvFormatter(_TestDataFileFormatter):
def __init__(self, cols=8):
self._cols = cols
self._formatter = RowSplittingFormatter(self._cols)
def _variable_table_formatter(self):
return self._formatter
def _setting_table_formatter(self):
return self._formatter
def _test_table_formatter(self, tests):
return self._formatter
def _keyword_table_formatter(self, keywords):
return self._formatter
def header_row(self, table):
return self._format_row(['*%s*' % cell for cell in table.header])
def _format_row(self, row):
return self._pad(row)
def _pad(self, row):
row = [cell.replace('\n', ' ') for cell in row]
return row + [''] * (self._cols - len(row))
class TxtFormatter(_TestDataFileFormatter):
_FIRST_COL_WIDTH = 18
_SETTING_NAME_WIDTH = 14
_align_last_column = False
def __init__(self, cols=8):
self._cols = cols
def _variable_table_formatter(self):
return SettingTableAligner(self._cols, self._SETTING_NAME_WIDTH)
def _setting_table_formatter(self):
return SettingTableAligner(self._cols, self._SETTING_NAME_WIDTH)
def _test_table_formatter(self, tests):
return self._indented_table_formatter(tests)
def _keyword_table_formatter(self, keywords):
return self._indented_table_formatter(keywords)
def header_row(self, table):
header = ['*** %s ***' % table.header[0]] + table.header[1:]
if self._should_align_columns(table):
return ColumnAligner(self._FIRST_COL_WIDTH, table,
self._align_last_column).align_row(header)
return header
def _indented_table_formatter(self, table):
if self._should_align_columns(table):
return ColumnAligner(self._FIRST_COL_WIDTH, table,
self._align_last_column)
return RowSplittingFormatter(self._cols)
def _should_align_columns(self, table):
return bool(table.header[1:])
def _format_row(self, row):
return self._escape(row)
def _escape(self, row):
return self._escape_consecutive_whitespace(
self._escape_empty_cell_from_start(row))
def _escape_empty_cell_from_start(self, row):
if len(row) >= 2 and row[0] == '' and row[1] == '':
row[1] = '\\'
return row
def _escape_consecutive_whitespace(self, row):
return [re.sub('\s\s+(?=[^\s])',
lambda match: '\\'.join(match.group(0)), item.replace('\n', ' ')) for item in row]
class PipeFormatter(TxtFormatter):
_align_last_column = True
def _escape(self, row):
row = self._format_empty_cells(row)
return self._escape_consecutive_whitespace(self._escape_pipes(row))
def _format_empty_cells(self, row):
return [' ' if not cell else cell for cell in row]
def _escape_pipes(self, row):
return [self._escape_pipes_from_cell(cell) for cell in row]
def _escape_pipes_from_cell(self, cell):
cell = cell.replace(' | ', ' \\| ')
if cell.startswith('| '):
cell = '\\' + cell
if cell.endswith(' |'):
cell = cell[:-1] + '\\|'
return cell
class HtmlFormatter(_TestDataFileFormatter):
def __init__(self):
self._default_cols = 5
self._cols = self._default_cols
self._formatter = SplittingHtmlFormatter(self._default_cols)
def empty_row(self):
return [NameCell('')] + [HtmlCell('') for _ in range(self._cols-1)]
def _setting_table_formatter(self):
self._cols = self._default_cols
return self._formatter
def _variable_table_formatter(self):
self._cols = self._default_cols
return self._formatter
def _test_table_formatter(self, tests):
return self._dynamic_width_formatter(tests)
def _keyword_table_formatter(self, keywords):
return self._dynamic_width_formatter(keywords)
def _dynamic_width_formatter(self, table):
if len(table.header) == 1:
self._cols = self._default_cols
return SplittingHtmlFormatter(self._cols)
self._cols = max(self._max_column_count(table), len(table.header))
return SingleLineHtmlFormatter(self._cols)
def header_row(self, table):
if len(table.header) == 1:
return [HeaderCell(table.header[0], self._default_cols)]
headers = self._pad_header(table)
return [HeaderCell(hdr) for hdr in headers]
def _pad_header(self, table):
return table.header + [''] * (self._max_column_count(table) - len(table.header))
def _max_column_count(self, table):
count = 0
for item in table:
for child in item:
count = max(count, len(child.as_list()) + 1)
return count
|
Python
| 0.000001 |
@@ -3664,16 +3664,59 @@
return
+table.type in %5B'test case', 'keyword'%5D and
bool(tab
|
99395e345f74bbedd29fd45eebe0738a3b5f4729
|
Test api endpoint for package show
|
ckanext/archiver/tests/test_api.py
|
ckanext/archiver/tests/test_api.py
|
Python
| 0 |
@@ -0,0 +1,1455 @@
+import pytest%0Aimport tempfile%0A%0Afrom ckan import model%0Afrom ckan import plugins%0Afrom ckan.tests import factories%0Aimport ckan.tests.helpers as helpers%0A%0Afrom ckanext.archiver import model as archiver_model%0Afrom ckanext.archiver.tasks import update_package%0A%0A%[email protected]('with_plugins')%[email protected]_config(%22ckanext-archiver.cache_url_root%22, %22http://localhost:50001/resources/%22)%[email protected]_config(%22ckanext-archiver.max_content_length%22, 1000000)%[email protected]_config(%22ckan.plugins%22, %22archiver testipipe%22)%0Aclass TestApi(object):%0A%0A @pytest.fixture(autouse=True)%0A @pytest.mark.usefixtures(u%22clean_db%22)%0A def initial_data(cls, clean_db):%0A archiver_model.init_tables(model.meta.engine)%0A cls.temp_dir = tempfile.mkdtemp()%0A%0A def test_package_show(self, client):%0A url = client + '/?status=200&content=test&content-type=csv'%0A testipipe = plugins.get_plugin('testipipe')%0A testipipe.reset()%0A%0A pkg_dict = %7B%0A 'name': 'test-package-api',%0A 'resources': %5B%0A %7B%0A 'url': url,%0A 'format': 'TXT',%0A 'description': 'Test'%0A %7D%0A %5D%0A %7D%0A pkg = factories.Dataset(**pkg_dict)%0A update_package(pkg%5B'id'%5D)%0A%0A result = helpers.call_action(%0A %22package_show%22,%0A id=pkg%5B%22id%22%5D%0A )%0A print(result)%0A assert 'archiver' in result.keys()%0A
|
|
070a5192e4473bbbbf25a881080413f771f05801
|
Add mockldap.ldap.functions taken from python-ldap
|
src/mockldap/ldap/functions.py
|
src/mockldap/ldap/functions.py
|
Python
| 0.000001 |
@@ -0,0 +1,601 @@
+import sys%0Afrom ldapobject import LDAPObject%0A%0Adef initialize(uri,trace_level=0,trace_file=sys.stdout,trace_stack_limit=None):%0A %22%22%22%0A Return LDAPObject instance by opening LDAP connection to%0A LDAP host specified by LDAP URL%0A%0A Parameters:%0A uri%0A LDAP URL containing at least connection scheme and hostport,%0A e.g. ldap://localhost:389%0A trace_level%0A If non-zero a trace output of LDAP calls is generated.%0A trace_file%0A File object where to write the trace output to.%0A Default is to use stdout.%0A %22%22%22%0A return LDAPObject(uri,trace_level,trace_file,trace_stack_limit)%0A
|
|
2f93251e77589c0edbb8e560940d29764caac9e0
|
Test password update functionality
|
tests/blueprints/authentication/test_views_password_update.py
|
tests/blueprints/authentication/test_views_password_update.py
|
Python
| 0 |
@@ -0,0 +1,2839 @@
+# -*- coding: utf-8 -*-%0A%0A%22%22%22%0A:Copyright: 2006-2016 Jochen Kupperschmidt%0A:License: Modified BSD, see LICENSE for details.%0A%22%22%22%0A%0Afrom byceps.services.authentication.password.models import Credential%0Afrom byceps.services.authentication.password import service as password_service%0Afrom byceps.services.authentication.session.models import SessionToken%0A%0Afrom tests.base import AbstractAppTestCase%0A%0Afrom testfixtures.user import create_user%0A%0A%0Aclass PasswordUpdateTestCase(AbstractAppTestCase):%0A%0A def test_when_logged_in_endpoint_is_available(self):%0A old_password = 'LekkerBratworsten'%0A new_password = 'EvenMoreSecure!!1'%0A%0A user = self.create_user(old_password)%0A%0A credential_before = self.find_credential(user.id)%0A self.assertIsNotNone(credential_before)%0A%0A session_token_before = self.find_session_token(user.id)%0A self.assertIsNotNone(session_token_before)%0A%0A form_data = %7B%0A 'old_password': old_password,%0A 'new_password': new_password,%0A 'new_password_confirmation': new_password,%0A %7D%0A%0A response = self.send_request(form_data, user=user)%0A%0A self.assertEqual(response.status_code, 302)%0A self.assertEqual(response.headers.get('Location'),%0A 'http://example.com/authentication/login')%0A%0A credential_after = self.find_credential(user.id)%0A session_token_after = self.find_session_token(user.id)%0A%0A self.assertIsNotNone(credential_after)%0A self.assertNotEqual(credential_before.password_hash,%0A credential_after.password_hash)%0A self.assertNotEqual(credential_before.updated_at,%0A credential_after.updated_at)%0A%0A self.assertIsNotNone(session_token_after)%0A self.assertNotEqual(session_token_before.token,%0A session_token_after.token)%0A self.assertNotEqual(session_token_before.created_at,%0A session_token_after.created_at)%0A%0A def test_when_not_logged_in_endpoint_is_unavailable(self):%0A form_data = %7B%7D%0A%0A response = self.send_request(form_data)%0A%0A self.assertEqual(response.status_code, 404)%0A%0A # helpers%0A%0A def create_user(self, password):%0A user = create_user(8516)%0A%0A self.db.session.add(user)%0A self.db.session.commit()%0A%0A password_service.create_password_hash(user.id, password)%0A%0A return user%0A%0A def find_credential(self, user_id):%0A return Credential.query.get(user_id)%0A%0A def find_session_token(self, user_id):%0A return SessionToken.query %5C%0A .filter_by(user_id=user_id) %5C%0A .one()%0A%0A def send_request(self, form_data, *, user=None):%0A url = '/authentication/password'%0A with self.client(user=user) as client:%0A return client.post(url, data=form_data)%0A
|
|
0c59028a1ef33b3627e65955bafbf9b415c9bc34
|
Add 457_Circular_Array_Loop.py (#34)
|
python/457_Circular_Array_Loop.py
|
python/457_Circular_Array_Loop.py
|
Python
| 0 |
@@ -0,0 +1,1149 @@
+class Solution:%0A def circularArrayLoop(self, nums: List%5Bint%5D) -%3E bool:%0A for i in range(len(nums)):%0A if nums%5Bi%5D == 0:%0A continue%0A %0A # if slow and fast pointers collide, then there exists a loop%0A slow = i%0A fast = self.index(nums, slow)%0A while nums%5Bslow%5D * nums%5Bfast%5D %3E 0 and nums%5Bslow%5D * nums%5Bself.index(nums, fast)%5D %3E 0:%0A if slow == fast and fast != self.index(nums, fast):%0A return True%0A elif slow == fast and fast == self.index(nums, fast):%0A break%0A slow = self.index(nums, slow)%0A fast = self.index(nums, self.index(nums, fast))%0A %0A # set path to all 0s since it doesn't work%0A runner = i%0A value = nums%5Brunner%5D%0A while nums%5Brunner%5D * value %3E 0:%0A temp = self.index(nums, runner)%0A nums%5Brunner%5D = 0%0A runner = temp%0A return False%0A %0A def index(self, nums, index):%0A length = len(nums)%0A return (index + nums%5Bindex%5D + length) %25 length%0A
|
|
ae484c893c9cbef5a80b908ba254885e1db4d0b3
|
Create 0015_auto_20200128_1045.py
|
bluebottle/activities/migrations/0015_auto_20200128_1045.py
|
bluebottle/activities/migrations/0015_auto_20200128_1045.py
|
Python
| 0.000001 |
@@ -0,0 +1,1320 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.15 on 2020-01-28 09:45%0Afrom __future__ import unicode_literals%0A%0Afrom django.conf import settings%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0Aimport django.utils.timezone%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('activities', '0014_add_permissions'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='contribution',%0A name='contribution_date',%0A field=models.DateTimeField(default=django.utils.timezone.now),%0A preserve_default=False,%0A ),%0A migrations.AlterField(%0A model_name='activity',%0A name='transition_date',%0A field=models.DateTimeField(blank=True, help_text='Date the contribution took place.', null=True, verbose_name='contribution date'),%0A ),%0A migrations.AlterField(%0A model_name='contribution',%0A name='transition_date',%0A field=models.DateTimeField(blank=True, null=True),%0A ),%0A migrations.AlterField(%0A model_name='contribution',%0A name='user',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='user'),%0A ),%0A %5D%0A
|
|
a59aef7c780c4d940ff56fa34ddf38de46056a6f
|
add package py-flask-compress (#7713)
|
var/spack/repos/builtin/packages/py-flask-compress/package.py
|
var/spack/repos/builtin/packages/py-flask-compress/package.py
|
Python
| 0 |
@@ -0,0 +1,1717 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass PyFlaskCompress(PythonPackage):%0A %22%22%22Flask-Compress allows you to easily compress your Flask application's%0A responses with gzip.%22%22%22%0A%0A homepage = %22https://github.com/libwilliam/flask-compress%22%0A url = %22https://pypi.io/packages/source/F/Flask-Compress/Flask-Compress-1.4.0.tar.gz%22%0A%0A version('1.4.0', 'd997f73e4ed5793ec526c135aa765e15')%0A%0A depends_on('py-setuptools', type='build')%0A depends_on('[email protected]:', type=('build', 'run'))%0A
|
|
d48b17dc82e359aca962449c6df51aaea88a11d3
|
add resource manager tests
|
pyvisa_py/testsuite/keysight_assisted_tests/test_resource_manager.py
|
pyvisa_py/testsuite/keysight_assisted_tests/test_resource_manager.py
|
Python
| 0.000008 |
@@ -0,0 +1,527 @@
+# -*- coding: utf-8 -*-%0A%22%22%22Test the Resource manager.%0A%0A%22%22%22%0Afrom pyvisa.testsuite.keysight_assisted_tests import require_virtual_instr%0Afrom pyvisa.testsuite.keysight_assisted_tests.test_resource_manager import (%0A TestResourceManager as BaseTestResourceManager,%0A TestResourceParsing as BaseTestResourceParsing,%0A)%0A%0A%0A@require_virtual_instr%0Aclass TestPyResourceManager(BaseTestResourceManager):%0A %22%22%22%0A %22%22%22%0A%0A pass%0A%0A%0A@require_virtual_instr%0Aclass TestPyResourceParsing(BaseTestResourceParsing):%0A %22%22%22%0A %22%22%22%0A%0A pass%0A
|
|
d623140b606d7ec9b874419b4414833d669f5677
|
add a way to set the last sync date
|
crate_project/apps/crate/management/commands/set_last_sync.py
|
crate_project/apps/crate/management/commands/set_last_sync.py
|
Python
| 0.000026 |
@@ -0,0 +1,314 @@
+import redis%0A%0Afrom django.conf import settings%0Afrom django.core.management.base import BaseCommand%0A%0A%0Aclass Command(BaseCommand):%0A%0A def handle(self, *args, **options):%0A r = redis.StrictRedis(**getattr(settings, %22PYPI_DATASTORE_CONFIG%22, %7B%7D))%0A if args:%0A r.set(%22crate:pypi:since%22, args%5B0%5D)%0A
|
|
4fd3c2fb2ac14fcfc7183108d2d3c79377d85790
|
read router for grouptagvalue instead of hardcoding default
|
src/sentry/models/grouptagvalue.py
|
src/sentry/models/grouptagvalue.py
|
"""
sentry.models.grouptagvalue
~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from datetime import timedelta
from django.db import connections, models
from django.db.models import Sum
from django.utils import timezone
from sentry.constants import MAX_TAG_KEY_LENGTH, MAX_TAG_VALUE_LENGTH
from sentry.db.models import (
Model, BoundedPositiveIntegerField, BaseManager, FlexibleForeignKey,
sane_repr
)
from sentry.utils import db
class GroupTagValue(Model):
"""
Stores the total number of messages seen by a group matching
the given filter.
"""
__core__ = False
project = FlexibleForeignKey('sentry.Project', null=True, related_name='grouptag')
group = FlexibleForeignKey('sentry.Group', related_name='grouptag')
times_seen = BoundedPositiveIntegerField(default=0)
key = models.CharField(max_length=MAX_TAG_KEY_LENGTH)
value = models.CharField(max_length=MAX_TAG_VALUE_LENGTH)
last_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
first_seen = models.DateTimeField(
default=timezone.now, db_index=True, null=True)
objects = BaseManager()
class Meta:
app_label = 'sentry'
db_table = 'sentry_messagefiltervalue'
unique_together = (
('group', 'key', 'value'),
)
index_together = (
('project', 'key', 'value', 'last_seen'),
)
__repr__ = sane_repr('project_id', 'group_id', 'key', 'value')
def save(self, *args, **kwargs):
if not self.first_seen:
self.first_seen = self.last_seen
super(GroupTag, self).save(*args, **kwargs)
@classmethod
def get_value_count(cls, group_id, key):
if db.is_postgres():
# This doesnt guarantee percentage is accurate, but it does ensure
# that the query has a maximum cost
cursor = connections['default'].cursor()
cursor.execute("""
SELECT SUM(t)
FROM (
SELECT times_seen as t
FROM sentry_messagefiltervalue
WHERE group_id = %s
AND key = %s
ORDER BY last_seen DESC
LIMIT 10000
) as a
""", [group_id, key])
return cursor.fetchone()[0] or 0
cutoff = timezone.now() - timedelta(days=7)
return cls.objects.filter(
group=group_id,
key=key,
last_seen__gte=cutoff,
).aggregate(t=Sum('times_seen'))['t']
@classmethod
def get_top_values(cls, group_id, key, limit=3):
if db.is_postgres():
# This doesnt guarantee percentage is accurate, but it does ensure
# that the query has a maximum cost
return list(cls.objects.raw("""
SELECT *
FROM (
SELECT *
FROM sentry_messagefiltervalue
WHERE group_id = %%s
AND key = %%s
ORDER BY last_seen DESC
LIMIT 10000
) as a
ORDER BY times_seen DESC
LIMIT %d
""" % limit, [group_id, key]))
cutoff = timezone.now() - timedelta(days=7)
return list(cls.objects.filter(
group=group_id,
key=key,
last_seen__gte=cutoff,
).order_by('-times_seen')[:limit])
GroupTag = GroupTagValue
|
Python
| 0 |
@@ -291,16 +291,24 @@
, models
+, router
%0Afrom dj
@@ -2001,32 +2001,76 @@
a maximum cost%0A
+ using = router.db_for_read(cls)%0A
curs
@@ -2090,17 +2090,13 @@
ons%5B
-'default'
+using
%5D.cu
|
ca96e71995c9daa17323a3285bee71c8d334c11e
|
Add abstract classifier
|
assisstant/keyboard/classification/abstract_classifier.py
|
assisstant/keyboard/classification/abstract_classifier.py
|
Python
| 0.999999 |
@@ -0,0 +1,371 @@
+from abc import ABC, abstractmethod, abstractproperty%0Aclass AbstractClassifier(ABC):%0A def __init__(self, freqs, duration, data=None):%0A self.freqs = freqs%0A self.duration = duration%0A if data:%0A self.data = data%0A self.train(data)%0A%0A @abstractmethod%0A def classify(self, sample):%0A pass%0A%0A @abstractmethod%0A def train(self, data):%0A pass%0A
|
|
e089107eb52c320309b3ddd2ea2b6e764f74ff09
|
Create LeverBox.py
|
_includes/LeverBox.py
|
_includes/LeverBox.py
|
Python
| 0 |
@@ -0,0 +1,1424 @@
+State_Probs = %7B(9,1,1,1,1,1,1,1,1,1) : 1%7D%0A%0Adef Modified_State(State,Indexes):%0A%09%0A%09New_State_List = list(State)%0A%09for i in Indexes:%0A%09%09New_State_List%5Bi%5D = 1%0A%09New_State = tuple(New_State_List)%0A%09return New_State%0A%0Adef Best_Case_Prob_For(State,Sum):%0A%09%09%0A%09Best_Case_Prob = 0%0A%0A%09if Sum %3C 10 and State%5BSum%5D == 0:%0A%09%09P = Prob_For_State(Modified_State(State,(Sum,)))%0A%09%09if P %3E Best_Case_Prob:%0A%09%09%09Best_Case_Prob = P%0A%0A%09for i in range(1, min(9,Sum)):%0A%09%09j = Sum - i%0A%09%09if j %3E 9 or i == j:%0A%09%09%09continue%0A%09%09try:%0A%09%09%09if State%5Bi%5D == 0 and State%5Bj%5D == 0:%0A%09%09%09%09P = Prob_For_State(Modified_State(State,(i,j)))%0A%09%09%09%09if P %3E Best_Case_Prob:%0A%09%09%09%09%09Best_Case_Prob = P%0A%09%09except:%0A%09%09%09print i,j,Sum,State%0A%09for i in range(1, min(7,Sum)):%0A%09%09for j in range(1, min(7,Sum)):%0A%09%09%09k = Sum - i - j%0A%09%09%09if k %3C 1 or k %3E 9 or i == j or j == k or i ==k:%0A%09%09%09%09continue%0A%09%09%09if State%5Bi%5D == 0 and State%5Bj%5D == 0 and State%5Bk%5D == 0:%0A%09%09%09%09P = Prob_For_State(Modified_State(State,(i,j,k)))%0A%09%09%09%09if P %3E Best_Case_Prob:%0A%09%09%09%09%09Best_Case_Prob = P%0A%09return Best_Case_Prob%0A%0A%0Adef Prob_For_State(State):%0A%09global State_Probs %0A%0A%09if State in State_Probs:%0A%09%09return State_Probs%5BState%5D%0A%0A%09# Throw one die%0A%09P1 = 0%0A%09for i in range(1,7):%0A%09%09P1 += 1.0/6 * Best_Case_Prob_For(State,i)%0A%0A%09# Throw two dice%0A%09P2 = 0%0A%09for i in range(1,7):%0A%09%09for j in range(1,7):%0A%09%09%09P2 += 1.0/36 * Best_Case_Prob_For(State,i+j)%0A%09P = max(P1,P2)%0A%09State_Probs%5BState%5D = P%0A%09return P%0A%0Aprint %22P(000000000) =%22, Prob_For_State((9,0,0,0,0,0,0,0,0,0))%0A
|
|
2eb090b406a341c3b225e59779d0046cf76efc6c
|
Add download_feeds.py script
|
download_feeds.py
|
download_feeds.py
|
Python
| 0 |
@@ -0,0 +1,551 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Aimport sys, os%0Aimport boto3%0A%0Abucketname = 'static.ucldc.cdlib.org'%0Aprefix = 'merritt/'%0A%0As3 = boto3.resource('s3')%0Abucket = s3.Bucket(bucketname)%0A%0Afor obj in bucket.objects.filter(Prefix=prefix):%0A if obj.key.endswith('.atom'):%0A print %22downloading %7B%7D%22.format(obj.key)%0A filename = obj.key.split('/')%5B1%5D %0A filepath = './feeds_current/%7B%7D'.format(filename)%0A print %22local filepath: %7B%7D%22.format(filepath)%0A s3.Bucket('static.ucldc.cdlib.org').download_file(obj.key, filepath)%0A
|
|
494c0603c4aedb83852a008fad2139c469b537fd
|
Rename histograms in memory_benchmark_unittest.
|
tools/perf/perf_tools/memory_benchmark_unittest.py
|
tools/perf/perf_tools/memory_benchmark_unittest.py
|
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from perf_tools import memory_benchmark
from telemetry.page import page_benchmark_unittest_base
class MemoryBenchmarkUnitTest(
page_benchmark_unittest_base.PageBenchmarkUnitTestBase):
def testMemoryBenchmark(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('page_with_link.html')
ps.pages[0].stress_memory = {'action': 'click_element', 'text': 'Click me'}
benchmark = memory_benchmark.MemoryBenchmark()
all_results = self.RunBenchmark(benchmark, ps)
self.assertEqual(0, len(all_results.page_failures))
self.assertEqual(1, len(all_results.page_results))
results0 = all_results.page_results[0]
expected_measurements = ['V8_MemoryExternalFragmentationTotal',
'V8_MemoryHeapSampleTotalCommitted',
'V8_MemoryHeapSampleTotalUsed']
self.assertTrue(all(
[m in results0.measurement_names for m in expected_measurements]))
|
Python
| 0.000056 |
@@ -827,17 +827,17 @@
s = %5B'V8
-_
+.
MemoryEx
@@ -887,33 +887,33 @@
'V8
-_
+.
MemoryHeapSample
@@ -961,17 +961,17 @@
'V8
-_
+.
MemoryHe
|
0ea6bab984abee943d93cdfa90273b7a7aabcf8f
|
add new package : brltty (#15161)
|
var/spack/repos/builtin/packages/brltty/package.py
|
var/spack/repos/builtin/packages/brltty/package.py
|
Python
| 0 |
@@ -0,0 +1,1206 @@
+# Copyright 2013-2020 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass Brltty(AutotoolsPackage):%0A %22%22%22BRLTTY is a background process (daemon) providing access to the%0A Linux/Unix console (when in text mode) for a blind person using%0A a refreshable braille display.%22%22%22%0A%0A homepage = %22http://brltty.app/%22%0A url = %22https://github.com/brltty/brltty/archive/BRLTTY-6.0.tar.gz%22%0A%0A version('6.0', sha256='acfea5274bdc9230b0ea1a87f8796e241615d4d2c1ba08d87601b9d116c7804c')%0A version('5.6', sha256='74f35043943525396b340b9f65f0d73c3cc4054a8f63d1c685f27ccf59f46c5d')%0A version('5.5', sha256='cd80a0d225f13779791dc3a72d7f137c06c48e5f2c9600e80a565d2378422207')%0A version('5.4', sha256='9ad5a540d29438a755f8b8f1f1534e0eba601c604f3d8223fa00b802959ec636')%0A%0A depends_on('autoconf', type='build')%0A depends_on('automake', type='build')%0A depends_on('libtool', type='build')%0A depends_on('m4', type='build')%0A depends_on('expat')%0A%0A def autoreconf(self, spec, prefix):%0A bash = which('bash')%0A bash('autogen')%0A
|
|
a8c7d9a2ed9462506130157ce5eccad9121013a3
|
add new package (#24004)
|
var/spack/repos/builtin/packages/r-afex/package.py
|
var/spack/repos/builtin/packages/r-afex/package.py
|
Python
| 0 |
@@ -0,0 +1,1690 @@
+# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other%0A# Spack Project Developers. See the top-level COPYRIGHT file for details.%0A#%0A# SPDX-License-Identifier: (Apache-2.0 OR MIT)%0A%0Afrom spack import *%0A%0A%0Aclass RAfex(RPackage):%0A %22%22%22Analysis of Factorial Experiments%0A%0A Convenience functions for analyzing factorial experiments using ANOVA or%0A mixed models. aov_ez(), aov_car(), and aov_4() allow specification of%0A between, within (i.e., repeated-measures), or mixed (i.e., split-plot)%0A ANOVAs for data in long format (i.e., one observation per row),%0A automatically aggregating multiple observations per individual and cell%0A of the design. mixed() fits mixed models using lme4::lmer() and computes%0A p-values for all fixed effects using either Kenward-Roger or Satterthwaite%0A approximation for degrees of freedom (LMM only), parametric bootstrap%0A (LMMs and GLMMs), or likelihood ratio tests (LMMs and GLMMs).%0A afex_plot() provides a high-level interface for interaction or one-way%0A plots using ggplot2, combining raw data and model estimates. afex uses%0A type 3 sums of squares as default (imitating commercial statistical%0A software).%0A %22%22%22%0A%0A homepage = %22https://github.com/singmann/afex%22%0A cran = %22afex%22%0A%0A version('0.28-1', sha256='cfb0b79bfa01b590afc3354a5b2ad3640d2f4974b036d6c256fa8e684bc69c2e')%0A%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('[email protected]:', type=('build', 'run'))%0A depends_on('r-car', type=('build', 'run'))%0A depends_on('r-reshape2', type=('build', 'run'))%0A
|
|
dee47d50ecf4f0e80a127d624027c1688d34b504
|
Use 0/1 for raspberry pi cover GPIO writes rather than true/false (#13610)
|
homeassistant/components/cover/rpi_gpio.py
|
homeassistant/components/cover/rpi_gpio.py
|
"""
Support for controlling a Raspberry Pi cover.
Instructions for building the controller can be found here
https://github.com/andrewshilliday/garage-door-controller
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover.rpi_gpio/
"""
import logging
from time import sleep
import voluptuous as vol
from homeassistant.components.cover import CoverDevice, PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.components.rpi_gpio as rpi_gpio
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COVERS = 'covers'
CONF_RELAY_PIN = 'relay_pin'
CONF_RELAY_TIME = 'relay_time'
CONF_STATE_PIN = 'state_pin'
CONF_STATE_PULL_MODE = 'state_pull_mode'
CONF_INVERT_STATE = 'invert_state'
CONF_INVERT_RELAY = 'invert_relay'
DEFAULT_RELAY_TIME = .2
DEFAULT_STATE_PULL_MODE = 'UP'
DEFAULT_INVERT_STATE = False
DEFAULT_INVERT_RELAY = False
DEPENDENCIES = ['rpi_gpio']
_COVERS_SCHEMA = vol.All(
cv.ensure_list,
[
vol.Schema({
CONF_NAME: cv.string,
CONF_RELAY_PIN: cv.positive_int,
CONF_STATE_PIN: cv.positive_int,
})
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_COVERS): _COVERS_SCHEMA,
vol.Optional(CONF_STATE_PULL_MODE, default=DEFAULT_STATE_PULL_MODE):
cv.string,
vol.Optional(CONF_RELAY_TIME, default=DEFAULT_RELAY_TIME): cv.positive_int,
vol.Optional(CONF_INVERT_STATE, default=DEFAULT_INVERT_STATE): cv.boolean,
vol.Optional(CONF_INVERT_RELAY, default=DEFAULT_INVERT_RELAY): cv.boolean,
})
# pylint: disable=unused-argument
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the RPi cover platform."""
relay_time = config.get(CONF_RELAY_TIME)
state_pull_mode = config.get(CONF_STATE_PULL_MODE)
invert_state = config.get(CONF_INVERT_STATE)
invert_relay = config.get(CONF_INVERT_RELAY)
covers = []
covers_conf = config.get(CONF_COVERS)
for cover in covers_conf:
covers.append(RPiGPIOCover(
cover[CONF_NAME], cover[CONF_RELAY_PIN], cover[CONF_STATE_PIN],
state_pull_mode, relay_time, invert_state, invert_relay))
add_devices(covers)
class RPiGPIOCover(CoverDevice):
"""Representation of a Raspberry GPIO cover."""
def __init__(self, name, relay_pin, state_pin, state_pull_mode,
relay_time, invert_state, invert_relay):
"""Initialize the cover."""
self._name = name
self._state = False
self._relay_pin = relay_pin
self._state_pin = state_pin
self._state_pull_mode = state_pull_mode
self._relay_time = relay_time
self._invert_state = invert_state
self._invert_relay = invert_relay
rpi_gpio.setup_output(self._relay_pin)
rpi_gpio.setup_input(self._state_pin, self._state_pull_mode)
rpi_gpio.write_output(self._relay_pin, not self._invert_relay)
@property
def name(self):
"""Return the name of the cover if any."""
return self._name
def update(self):
"""Update the state of the cover."""
self._state = rpi_gpio.read_input(self._state_pin)
@property
def is_closed(self):
"""Return true if cover is closed."""
return self._state != self._invert_state
def _trigger(self):
"""Trigger the cover."""
rpi_gpio.write_output(self._relay_pin, self._invert_relay)
sleep(self._relay_time)
rpi_gpio.write_output(self._relay_pin, not self._invert_relay)
def close_cover(self, **kwargs):
"""Close the cover."""
if not self.is_closed:
self._trigger()
def open_cover(self, **kwargs):
"""Open the cover."""
if self.is_closed:
self._trigger()
|
Python
| 0.001824 |
@@ -2964,35 +2964,36 @@
elf._relay_pin,
-not
+0 if
self._invert_re
@@ -2991,24 +2991,31 @@
invert_relay
+ else 1
)%0A%0A @prop
@@ -3478,24 +3478,29 @@
._relay_pin,
+ 1 if
self._inver
@@ -3506,16 +3506,23 @@
rt_relay
+ else 0
)%0A
@@ -3594,19 +3594,20 @@
ay_pin,
-not
+0 if
self._i
@@ -3617,16 +3617,23 @@
rt_relay
+ else 1
)%0A%0A d
|
c8cc7f6fe7c0f59697972602773d67b3fde40360
|
Add basic filter classes.
|
penchy/filters.py
|
penchy/filters.py
|
Python
| 0 |
@@ -0,0 +1,639 @@
+class Filter(object):%0A %22%22%22%0A Base class for filters.%0A%0A Inheriting classes must implement:%0A - %60%60run(*inputs)%60%60 to run the filter on inputs which can be Producer or%0A Filter instances, after executing self.out has to be set to the%0A path of the produced output file%0A %22%22%22%0A%0A def run(self, inputs):%0A %22%22%22%0A Run the filter on the inputs.%0A%0A :param inputs: Producer or Filter classes which output will be processed.%0A %22%22%22%0A raise NotImplementedError(%22run must be implemented by filters%22)%0A%0A%0Aclass WallclockDacapo(Filter):%0A #TODO%0A pass%0A%0Aclass HProf(Filter):%0A #TODO%0A pass%0A
|
|
d46607c0d06e66346e2fddc76a28c9e8773c6d71
|
Add option to specify the location of the API (fixes #8115) (#8118)
|
homeassistant/components/sensor/pi_hole.py
|
homeassistant/components/sensor/pi_hole.py
|
"""
Support for getting statistical data from a Pi-Hole system.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.pi_hole/
"""
import logging
import json
from datetime import timedelta
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME, CONF_HOST, CONF_SSL, CONF_VERIFY_SSL, CONF_MONITORED_CONDITIONS)
_LOGGER = logging.getLogger(__name__)
_ENDPOINT = '/admin/api.php'
ATTR_BLOCKED_DOMAINS = 'domains_blocked'
ATTR_PERCENTAGE_TODAY = 'percentage_today'
ATTR_QUERIES_TODAY = 'queries_today'
DEFAULT_HOST = 'localhost'
DEFAULT_METHOD = 'GET'
DEFAULT_NAME = 'Pi-Hole'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
SCAN_INTERVAL = timedelta(minutes=5)
MONITORED_CONDITIONS = {
'dns_queries_today': ['DNS Queries Today',
None, 'mdi:network-question'],
'ads_blocked_today': ['Ads Blocked Today',
None, 'mdi:close-octagon-outline'],
'ads_percentage_today': ['Ads Percentage Blocked Today',
'%', 'mdi:close-octagon-outline'],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_MONITORED_CONDITIONS, default=MONITORED_CONDITIONS):
vol.All(cv.ensure_list, [vol.In(MONITORED_CONDITIONS)]),
})
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Pi-Hole sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
use_ssl = config.get(CONF_SSL)
verify_ssl = config.get(CONF_VERIFY_SSL)
api = PiHoleAPI(host, use_ssl, verify_ssl)
if api.data is None:
_LOGGER.error("Unable to fetch data from Pi-Hole")
return False
sensors = [PiHoleSensor(hass, api, name, condition)
for condition in config[CONF_MONITORED_CONDITIONS]]
add_devices(sensors, True)
class PiHoleSensor(Entity):
"""Representation of a Pi-Hole sensor."""
def __init__(self, hass, api, name, variable):
"""Initialize a Pi-Hole sensor."""
self._hass = hass
self._api = api
self._name = name
self._var_id = variable
variable_info = MONITORED_CONDITIONS[variable]
self._var_name = variable_info[0]
self._var_units = variable_info[1]
self._var_icon = variable_info[2]
@property
def name(self):
"""Return the name of the sensor."""
return "{} {}".format(self._name, self._var_name)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._var_units
# pylint: disable=no-member
@property
def state(self):
"""Return the state of the device."""
return self._api.data[self._var_id]
# pylint: disable=no-member
@property
def device_state_attributes(self):
"""Return the state attributes of the Pi-Hole."""
return {
ATTR_BLOCKED_DOMAINS: self._api.data['domains_being_blocked'],
}
def update(self):
"""Get the latest data from the Pi-Hole API."""
self._api.update()
class PiHoleAPI(object):
"""Get the latest data and update the states."""
def __init__(self, host, use_ssl, verify_ssl):
"""Initialize the data object."""
from homeassistant.components.sensor.rest import RestData
uri_scheme = 'https://' if use_ssl else 'http://'
resource = "{}{}{}".format(uri_scheme, host, _ENDPOINT)
self._rest = RestData('GET', resource, None, None, None, verify_ssl)
self.data = None
self.update()
def update(self):
"""Get the latest data from the Pi-Hole."""
try:
self._rest.update()
self.data = json.loads(self._rest.data)
except TypeError:
_LOGGER.error("Unable to fetch data from Pi-Hole")
|
Python
| 0 |
@@ -607,14 +607,8 @@
= '/
-admin/
api.
@@ -739,33 +739,88 @@
y'%0A%0A
-DEFAULT_HOST = 'localhost
+CONF_LOCATION = 'location'%0ADEFAULT_HOST = 'localhost'%0A%0ADEFAULT_LOCATION = 'admin
'%0ADE
@@ -1536,32 +1536,102 @@
L): cv.boolean,%0A
+ vol.Optional(CONF_LOCATION, default=DEFAULT_LOCATION): cv.string,%0A
vol.Optional
@@ -2040,16 +2040,57 @@
NF_SSL)%0A
+ location = config.get(CONF_LOCATION)%0A
veri
@@ -2151,140 +2151,60 @@
API(
-host, use_ssl, verify_ssl)%0A%0A if api.data is None:%0A _LOGGER.error(%22Unable to fetch data from Pi-Hole%22)%0A return False
+'%7B%7D/%7B%7D'.format(host, location), use_ssl, verify_ssl)
%0A%0A
@@ -3615,16 +3615,163 @@
%7D%0A%0A
+ @property%0A def available(self):%0A %22%22%22Could the device be accessed during the last update call.%22%22%22%0A return self._api.available%0A%0A
def
@@ -4333,16 +4333,45 @@
= None%0A
+ self.available = True
%0A
@@ -4557,16 +4557,50 @@
t.data)%0A
+ self.available = True%0A
@@ -4680,8 +4680,43 @@
-Hole%22)%0A
+ self.available = False%0A
|
8b95f442f3e78a5f3de539075379b88fc940e818
|
add custom settings for momza
|
casepro/settings_production_momza.py
|
casepro/settings_production_momza.py
|
Python
| 0 |
@@ -0,0 +1,1471 @@
+from __future__ import unicode_literals%0Aimport os%0A%0A# import our default settings%0Afrom settings_production import * # noqa%0A%0A# Pods%0APODS = %5B%7B%0A 'label': %22family_connect_registration_pod%22,%0A 'title': %22Registration Information%22,%0A 'url': os.environ.get('REGISTRATION_URL', ''),%0A 'token': os.environ.get('REGISTRATION_AUTH_TOKEN',%0A 'replace-with-auth-token'),%0A 'contact_id_fieldname': os.environ.get('REGISTRATION_CONTACT_ID_FIELDNAME',%0A 'registrant_id'),%0A 'field_mapping': %5B%0A %7B%22field%22: %22reg_type%22, %22field_name%22: %22Registration Type%22%7D,%0A %7B%22field%22: %22language%22, %22field_name%22: %22Language Preference%22%7D,%0A %7B%22field%22: %22id_type%22, %22field_name%22: %22ID Type%22%7D,%0A %7B%22field%22: %22sa_id_no%22, %22field_name%22: %22ID Number%22%7D,%0A %7B%22field%22: %22mom_dob%22, %22field_name%22: %22Mother's Date of Birth%22%7D,%0A %7B%22field%22: %22consent%22, %22field_name%22: %22Consent%22%7D,%0A %7B%22field%22: %22operator_id%22, %22field_name%22: %22Operator ID%22%7D,%0A %7B%22field%22: %22registrant_id%22, %22field_name%22: %22Registrant ID%22%7D,%0A %7B%22field%22: %22msisdn_registrant%22, %22field_name%22: %22MSISDN of Registrant%22%7D,%0A %7B%22field%22: %22msisdn_device%22, %22field_name%22: %22MSISDN of Device%22%7D,%0A %5D%0A%7D, %7B%0A 'label': %22family_connect_subscription_pod%22,%0A 'title': %22Subscription Information%22,%0A 'url': os.environ.get('SUBSCRIPTION_URL', ''),%0A 'token': os.environ.get('SUBSCRIPTION_AUTH_TOKEN',%0A 'replace-with-auth-token'),%0A%7D%5D%0A
|
|
797249c42c8c1c0d6eda05dbf9e9d16d2706b373
|
Add LeNet example with custom scoring and train_samples_per_iteration.
|
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater.py
|
h2o-py/tests/testdir_algos/deepwater/pyunit_lenet_deepwater.py
|
Python
| 0 |
@@ -0,0 +1,868 @@
+from __future__ import print_function%0Aimport sys, os%0Asys.path.insert(1, os.path.join(%22..%22,%22..%22,%22..%22))%0Aimport h2o%0Afrom tests import pyunit_utils%0Afrom h2o.estimators.deepwater import H2ODeepWaterEstimator%0A%0Adef deepwater_lenet():%0A print(%22Test checks if Deep Water works fine with a multiclass image dataset%22)%0A%0A frame = h2o.import_file(pyunit_utils.locate(%22bigdata/laptop/deepwater/imagenet/cat_dog_mouse.csv%22))%0A print(frame.head(5))%0A model = H2ODeepWaterEstimator(epochs=100, rate=1e-3, network='lenet', score_interval=0, train_samples_per_iteration=1000)%0A model.train(x=%5B0%5D,y=1, training_frame=frame)%0A model.show()%0A error = model.model_performance(train=True).mean_per_class_error()%0A assert error %3C 0.1, %22mean classification error is too high : %22 + str(error)%0A%0Aif __name__ == %22__main__%22:%0A pyunit_utils.standalone_test(deepwater_lenet)%0Aelse:%0A deepwater_lenet()%0A
|
|
26d095d44a02862a4d567537e824170e75930a9a
|
add email people script
|
fantasydota/scripts/email_users.py
|
fantasydota/scripts/email_users.py
|
Python
| 0.000003 |
@@ -0,0 +1,1151 @@
+from fantasydota import DBSession%0Afrom fantasydota.models import User%0Afrom pyramid_mailer import Mailer%0Afrom pyramid_mailer.message import Message%0A%0A%0Adef email_users():%0A session = DBSession()%0A for user in session.query(User).filter(User.email.isnot(%22%22)).all():%0A if user.email:%0A email = %22testemail%22#user.email%0A mailer = Mailer()%0A message = Message(subject=%22Fantasy Hero Dota New System%22,%0A sender=%22Fantasy Dota EU%22,%0A recipients=%5Bemail%5D,%0A body=%22Hi %25s.%5Cn%5CnJust letting you know fantasy leagues now run every week, on all pro circuit matches%5Cn%5Cn%22%0A %22You can pick your team for first week starting 1st January now https://www.fantasyesport.eu/dota/team%5Cn%5Cn%22%0A %22This is the 'finalised' state of the site for DotA. Therefore I will not email anyone again. Apologies for the spam/promotion. Have a nice Christmas :D%22 %25 (%0A user.username))%0A mailer.send(message)%0A return%0A%0Aif __name__ == '__main__':%0A email_users()%0A
|
|
8cdbbbaf33cd09bc742761ce8cd5b79b185710cd
|
Introduce a timer based update of activities
|
webtool/server/management/commands/timer_update.py
|
webtool/server/management/commands/timer_update.py
|
Python
| 0 |
@@ -0,0 +1,2822 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import absolute_import%0Afrom __future__ import unicode_literals%0Afrom __future__ import print_function%0A%0Aimport io%0A%0Aimport datetime%0Afrom django.core.management.base import BaseCommand%0A%0Afrom server.models import Instruction, Tour, Talk, Session, Season%0Afrom server.views.bulletin import Activities%0Afrom server.views.bulletin.translator import Translator%0A%0A%0Aclass Command(BaseCommand):%0A leave_locale_alone = True%0A help = 'Update activities regarding the current date'%0A%0A def __init__(self, *args, **kwargs):%0A super(Command, self).__init__(*args, **kwargs)%0A%0A def handle(self, *args, **options):%0A%0A season = Season.objects.get(current=True)%0A%0A canceled = season.state_list.get(name='Ausgefallen')%0A completed = season.state_list.get(name='Durchgef%C3%BChrt')%0A not_touch = (canceled.id, completed.id)%0A%0A today = datetime.date.today()%0A%0A for instruction in Instruction.objects.filter(topic__seasons=season).exclude(state_id__in=not_touch):%0A event = instruction.instruction%0A event_done = ((event.end_date is None and event.start_date %3C today) or%0A (event.end_date and event.end_date %3C today))%0A if event_done:%0A instruction.state = completed%0A instruction.save()%0A instruction.instruction.save()%0A for event in instruction.meeting_list.all():%0A event.save()%0A%0A for tour in Tour.objects.filter(season=season).exclude(state_id__in=not_touch):%0A event = tour.tour%0A event_done = ((event.end_date is None and event.start_date %3C today) or%0A (event.end_date and event.end_date %3C today))%0A if event_done:%0A tour.state = completed%0A tour.save()%0A tour.deadline.save()%0A if tour.preliminary:%0A tour.preliminary.save()%0A tour.tour.save()%0A%0A for talk in Talk.objects.filter(season=season).exclude(state_id__in=not_touch):%0A event = talk.talk%0A event_done = ((event.end_date is None and event.start_date %3C today) or%0A (event.end_date and event.end_date %3C today))%0A if event_done:%0A talk.state = completed%0A talk.save()%0A talk.talk.save()%0A%0A for session in Session.objects.filter(collective__seasons=season).exclude(state_id__in=not_touch):%0A event = session.session%0A event_done = ((event.end_date is None and event.start_date %3C today) or%0A (event.end_date and event.end_date %3C today))%0A if event_done:%0A session.state = completed%0A session.save()%0A session.session.save()%0A
|
|
eceee762dd3773aacceb52119014dad88e363c8d
|
Create find_subnets_with_broker.py
|
python/NetMRI_GUI_Python/find_subnets_with_broker.py
|
python/NetMRI_GUI_Python/find_subnets_with_broker.py
|
Python
| 0.000001 |
@@ -0,0 +1,771 @@
+# BEGIN-SCRIPT-BLOCK%0A#%0A# Script-Filter:%0A# true%0A#%0A# END-SCRIPT-BLOCK%0A%0Afrom infoblox_netmri.easy import NetMRIEasy%0Aimport re%0A%0A# This values will be provided by NetMRI before execution%0Adefaults = %7B%0A %22api_url%22: api_url,%0A %22http_username%22: http_username,%0A %22http_password%22: http_password,%0A %22job_id%22: job_id,%0A %22device_id%22: device_id,%0A %22batch_id%22: batch_id,%0A %22script_login%22 : %22false%22%0A%7D%0A%0A# Create NetMRI context manager. It will close session after execution%0Awith NetMRIEasy(**defaults) as easy:%0A subnet_broker = easy.client.get_broker('Subnet')%0A all_subnets = subnet_broker.index%0A print(all_subnets)%0A params = %7B%0A 'select': 'SubnetCIDR'%0A %7D%0A results = all_subnets(**params)%0A for entry in results:%0A print(entry.SubnetCIDR)%0A
|
|
c96b885d4446db96402d9770d71012dbcafcb8cf
|
install go-vcf-tools by manage.py command
|
pergenie/apps/genome/management/commands/setup_go_vcf_tools.py
|
pergenie/apps/genome/management/commands/setup_go_vcf_tools.py
|
Python
| 0 |
@@ -0,0 +1,1410 @@
+import os%0Aimport glob%0Aimport shutil%0Aimport tarfile%0Aimport platform%0A%0Afrom django.core.management.base import BaseCommand, CommandError%0Afrom django.conf import settings%0A%0Afrom lib.utils.io import get_url_content%0Afrom lib.utils import clogging%0Alog = clogging.getColorLogger(__name__)%0A%0A%0Aclass Command(BaseCommand):%0A help = %22Setup go-vcf-tools%22%0A%0A def handle(self, *args, **options):%0A tmp_dir = os.path.join(settings.BASE_DIR, 'tmp')%0A bin_dir = os.path.join(settings.BASE_DIR, 'bin')%0A%0A # TODO: check already exists?%0A%0A log.info('Fetching go-vcf-tools ...')%0A url = '%7Brepo%7D/releases/download/%7Btag%7D/%7Bos_platform%7D.amd64.tar.gz'.format(repo='https://github.com/knmkr/go-vcf-tools',%0A tag='0.0.1', # TODO: get tag from command argument%0A os_platform=platform.system().lower())%0A tar_gz = os.path.join(tmp_dir, 'go-vcf-tools.tar.gz')%0A get_url_content(url, tar_gz)%0A%0A with tarfile.open(tar_gz, 'r') as tar:%0A dst = os.path.join(tmp_dir, 'go-vcf-tools')%0A tar.extractall(dst)%0A for tool in glob.glob(os.path.join(tmp_dir, 'go-vcf-tools', '*', 'vcf-*')):%0A shutil.copy(tool, bin_dir)%0A%0A os.remove(tar_gz)%0A shutil.rmtree(dst)%0A%0A log.info('Done.')%0A
|
|
76fbec63667f3844f2763d72e57e61c07209cdad
|
Create Meh.py
|
Meh/Meh.py
|
Meh/Meh.py
|
Python
| 0.000001 |
@@ -0,0 +1,363 @@
+import discord%0Afrom discord.ext import commands%0A%0Aclass Mycog:%0A %22%22%22My custom cog that does stuff!%22%22%22%0A%0A def __init__(self, bot):%0A self.bot = bot%0A%0A @commands.command()%0A async def mycom(self):%0A %22%22%22This does stuff!%22%22%22%0A%0A #Your code will go here%0A await self.bot.say(%22I can do stuff!%22)%0A%0Adef setup(bot):%0A bot.add_cog(Mycog(bot))%0A
|
|
6bbac26ffb6f2131cad62df48b5afa3849ee8276
|
add parameter 'force' in method 'init' to reinitialize existing parameters This is useful when creating a new database from a dump (reinitialize database.uuid, etc.)
|
openerp/addons/base/ir/ir_config_parameter.py
|
openerp/addons/base/ir/ir_config_parameter.py
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Store database-specific configuration parameters
"""
from osv import osv,fields
import uuid
import datetime
from tools import misc, config
from openerp import SUPERUSER_ID
"""
A dictionary holding some configuration parameters to be initialized when the database is created.
"""
_default_parameters = {
"database.uuid": lambda: str(uuid.uuid1()),
"database.create_date": lambda: datetime.datetime.now().strftime(misc.DEFAULT_SERVER_DATETIME_FORMAT),
"web.base.url": lambda: "http://localhost:%s" % config.get('xmlrpc_port'),
}
class ir_config_parameter(osv.osv):
"""Per-database storage of configuration key-value pairs."""
_name = 'ir.config_parameter'
_columns = {
'key': fields.char('Key', size=256, required=True, select=1),
'value': fields.text('Value', required=True),
}
_sql_constraints = [
('key_uniq', 'unique (key)', 'Key must be unique.')
]
def init(self, cr):
"""
Initializes the parameters listed in _default_parameters.
"""
for key, func in _default_parameters.iteritems():
ids = self.search(cr, SUPERUSER_ID, [('key','=',key)])
if not ids:
self.set_param(cr, SUPERUSER_ID, key, func())
def get_param(self, cr, uid, key, default=False, context=None):
"""Retrieve the value for a given key.
:param string key: The key of the parameter value to retrieve.
:param string default: default value if parameter is missing.
:return: The value of the parameter, or ``default`` if it does not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if not ids:
return default
param = self.browse(cr, uid, ids[0], context=context)
value = param.value
return value
def set_param(self, cr, uid, key, value, context=None):
"""Sets the value of a parameter.
:param string key: The key of the parameter value to set.
:param string value: The value to set.
:return: the previous value of the parameter or False if it did
not exist.
:rtype: string
"""
ids = self.search(cr, uid, [('key','=',key)], context=context)
if ids:
param = self.browse(cr, uid, ids[0], context=context)
old = param.value
self.write(cr, uid, ids, {'value': value}, context=context)
return old
else:
self.create(cr, uid, {'key': key, 'value': value}, context=context)
return False
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
Python
| 0.000188 |
@@ -1919,19 +1919,32 @@
self, cr
+, force=False
):%0A
-
@@ -2013,16 +2013,79 @@
meters.%0A
+ It overrides existing parameters if force is %60%60True%60%60.%0A
@@ -2146,16 +2146,108 @@
tems():%0A
+ # force=True skips search and always performs the 'if' body (because ids=False)%0A
@@ -2247,32 +2247,46 @@
ids =
+ not force and
self.search(cr,
|
6693172856655329d99f038d54b1d8819fc1a9b6
|
Add native code emitters example.
|
scripts/examples/02-Board-Control/native_emitters.py
|
scripts/examples/02-Board-Control/native_emitters.py
|
Python
| 0 |
@@ -0,0 +1,229 @@
+import time%0A%[email protected]_thumb%0Adef asm():%0A movw(r0, 42)%0A%[email protected]%0Adef viper(a, b):%0A return a + b%0A%[email protected]%0Adef native(a, b):%0A return a + b%0A%0A%0Aprint(asm())%0Aprint(viper(1, 2))%0Aprint(native(1, 2))%0A%0A
|
|
5cb43fbf0efadff7af68836243eb7e1711e7df1c
|
Add test_object script
|
jsk_2015_05_baxter_apc/euslisp/test_object_recognition.py
|
jsk_2015_05_baxter_apc/euslisp/test_object_recognition.py
|
Python
| 0.000002 |
@@ -0,0 +1,601 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A#%0Aimport rospy%0Afrom jsk_2014_picking_challenge.msg import ObjectRecognition%0A%0Aleft_result = None%0Aright_result = None%0A%0Adef _cb_left(msg):%0A global left_result%0A left_result = dict(zip(msg.candidates, msg.probabilities))%0A%0Adef _cb_right(msg):%0A global right_result%0A right_result = dict(zip(msg.candidates, msg.probabilities))%0A%0Arospy.init_node('test_object_recognition')%0Arospy.Subscriber('/left_process/bof_object_matcher/output', ObjectRecognition, _cb_left)%0Arospy.Subscriber('/right_process/bof_object_matcher/output', ObjectRecognition, _cb_right)%0A
|
|
f56e86ff774a55e7882957a8928bdca98ce4c3e8
|
Add missing migration
|
features/groups/migrations/0017_auto_20171127_1447.py
|
features/groups/migrations/0017_auto_20171127_1447.py
|
Python
| 0.0002 |
@@ -0,0 +1,639 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.11.7 on 2017-11-27 13:47%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport features.stadt.forms%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('groups', '0016_auto_20171120_1311'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='group',%0A name='slug',%0A field=models.SlugField(blank=True, help_text='Wird auch als Kurzname verwendet', null=True, unique=True, validators=%5Bfeatures.stadt.forms.validate_entity_slug%5D, verbose_name='Adresse der Gruppenseite'),%0A ),%0A %5D%0A
|
|
afedd723ad85b99c2ebd08246b5ed13b37cd62e9
|
make psnr.py
|
src/psnr.py
|
src/psnr.py
|
Python
| 0.001547 |
@@ -0,0 +1,107 @@
+#%0A# psnr.py%0A# Created by pira on 2017/08/05.%0A#%0A%0A#coding: utf-8%0Au%22%22%22For PSNR(Peak Signal to Noise Ratio).%22%22%22
|
|
5bb154f41f25d8c9bbd9067b29a03a5fc2dff371
|
Add functional tests for floating IP.
|
openstack/tests/functional/network/v2/test_floating_ip.py
|
openstack/tests/functional/network/v2/test_floating_ip.py
|
Python
| 0.000116 |
@@ -0,0 +1,2796 @@
+# Licensed under the Apache License, Version 2.0 (the %22License%22); you may%0A# not use this file except in compliance with the License. You may obtain%0A# a copy of the License at%0A#%0A# http://www.apache.org/licenses/LICENSE-2.0%0A#%0A# Unless required by applicable law or agreed to in writing, software%0A# distributed under the License is distributed on an %22AS IS%22 BASIS, WITHOUT%0A# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the%0A# License for the specific language governing permissions and limitations%0A# under the License.%0A%0Aimport uuid%0A%0Afrom openstack.network.v2 import floating_ip%0Afrom openstack.network.v2 import network%0Afrom openstack.network.v2 import subnet%0Afrom openstack.tests.functional import base%0A%0A%0Aclass TestFloatingIP(base.BaseFunctionalTest):%0A%0A NET_NAME = uuid.uuid4().hex%0A SUB_NAME = uuid.uuid4().hex%0A IPV4 = 4%0A CIDR = %2210.100.0.0/24%22%0A NET_ID = None%0A SUB_ID = None%0A FIP_ID = None%0A%0A @classmethod%0A def setUpClass(cls):%0A super(TestFloatingIP, cls).setUpClass()%0A args = %7B'router:external': True%7D%0A net = cls.conn.network.create_network(name=cls.NET_NAME, **args)%0A assert isinstance(net, network.Network)%0A cls.assertIs(cls.NET_NAME, net.name)%0A cls.NET_ID = net.id%0A sub = cls.conn.network.create_subnet(name=cls.SUB_NAME,%0A ip_version=cls.IPV4,%0A network_id=cls.NET_ID,%0A cidr=cls.CIDR)%0A assert isinstance(sub, subnet.Subnet)%0A cls.assertIs(cls.SUB_NAME, sub.name)%0A cls.SUB_ID = sub.id%0A fip = cls.conn.network.create_ip(floating_network_id=cls.NET_ID)%0A assert isinstance(fip, floating_ip.FloatingIP)%0A cls.FIP_ID = fip.id%0A%0A @classmethod%0A def tearDownClass(cls):%0A sot = cls.conn.network.delete_ip(cls.FIP_ID, ignore_missing=False)%0A cls.assertIs(None, sot)%0A sot = cls.conn.network.delete_subnet(cls.SUB_ID, ignore_missing=False)%0A cls.assertIs(None, sot)%0A sot = cls.conn.network.delete_network(cls.NET_ID, ignore_missing=False)%0A cls.assertIs(None, sot)%0A%0A def test_find(self):%0A sot = self.conn.network.find_ip(self.FIP_ID)%0A self.assertEqual(self.FIP_ID, sot.id)%0A%0A def test_get(self):%0A sot = self.conn.network.get_ip(self.FIP_ID)%0A self.assertEqual(self.NET_ID, sot.floating_network_id)%0A self.assertEqual('10.100.0.2', sot.floating_ip_address)%0A self.assertIn('floating_ip_address', sot)%0A self.assertIn('fixed_ip_address', sot)%0A self.assertIn('port_id', sot)%0A self.assertIn('router_id', sot)%0A%0A def test_list(self):%0A ids = %5Bo.id for o in self.conn.network.ips()%5D%0A self.assertIn(self.FIP_ID, ids)%0A
|
|
a5720071a950185e5afb1992dd4b66b47aefc242
|
Bump version 0.2.5
|
accounting/__init__.py
|
accounting/__init__.py
|
import os
# Use 'final' as the 4th element to indicate
# a full release
VERSION = (0, 2, 4)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
# Append 3rd digit if > 0
if VERSION[2]:
version = '%s.%s' % (version, VERSION[2])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'accounting/templates/accounting/base.html' can be accessed
# via both 'base.html' and 'accounting/base.html'. This allows Accounting's
# templates to be extended by templates with the same filename
ACCOUNTING_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/accounting')
ACCOUNTING_APPS = (
'accounting',
'accounting.libs',
'accounting.apps.connect',
'accounting.apps.people',
'accounting.apps.books',
'accounting.apps.reports',
# Third party apps that accounting depends on
'bootstrap3',
'django_select2',
'datetimewidget',
)
ACCOUNTING_TEMPLATE_CONTEXT_PROCESSORS = (
'accounting.apps.context_processors.metadata',
'accounting.apps.books.context_processors.organizations',
)
ACCOUNTING_MIDDLEWARE_CLASSES = (
'accounting.apps.books.middlewares.AutoSelectOrganizationMiddleware',
)
def get_apps():
return ACCOUNTING_APPS
|
Python
| 0.000001 |
@@ -84,17 +84,17 @@
(0, 2,
-4
+5
)%0A%0A%0Adef
|
fa6060a21767a0b5b2b3a10e4301e0c1a30134cb
|
Test the lit0,cmp before bra eliminator
|
i8c/tests/test_opt_lit0_cmp_before_bra.py
|
i8c/tests/test_opt_lit0_cmp_before_bra.py
|
Python
| 0 |
@@ -0,0 +1,709 @@
+from i8c.tests import TestCase%0A%0ASOURCE1 = %22%22%22%5C%0Adefine test::optimize_cmp_bra_const_const returns ptr%0A argument ptr x%0A%0A dup%0A load NULL%0A beq return_the_null%0A deref ptr%0A return%0Areturn_the_null:%0A%22%22%22%0A%0ASOURCE2 = %22%22%22%5C%0Adefine test::optimize_cmp_bra_const_const returns ptr%0A argument ptr x%0A%0A dup%0A load NULL%0A bne dereference%0A return%0Adereference:%0A deref ptr%0A%22%22%22%0A%0Aclass TestOptimizeLit0CmpBeforeBra(TestCase):%0A def test_optimize_lit0_cmp_before_bra(self):%0A %22%22%22Check that lit0,cmp before bra is eliminated.%22%22%22%0A for source in SOURCE1, SOURCE2:%0A tree, output = self.compile(source)%0A self.assertEqual(%5B%22dup%22, %22bra%22, %22skip%22, %22deref%22%5D, output.opnames)%0A
|
|
7be2721bfcbf3376ddce4d58f2cfe9680803f9bb
|
Create center_dmenu script.
|
center_dmenu.py
|
center_dmenu.py
|
Python
| 0 |
@@ -0,0 +1,1264 @@
+#!/usr/bin/env python2%0Afrom Xlib import display%0Aimport sys%0Afrom os import system%0A%0A%0Adef get_dimensions():%0A current_display = display.Display()%0A current_screen = current_display.screen()%0A return (current_screen%5B'width_in_pixels'%5D,%0A current_screen%5B'height_in_pixels'%5D)%0A%0A%0Adef parse_dmenu_args(args):%0A x_width, x_height = get_dimensions()%0A num_args = len(args)%0A%0A # Set some default values for dmenu args%0A dmenu_run_args = %7B%0A 'x': 200,%0A 'height': 50,%0A 'extra_args': %22-fn 'Inconsolata:size=10'%22%0A %7D%0A%0A # Get arguments from the command line.%0A if num_args %3E 1:%0A dmenu_run_args%5B'x'%5D = int(args%5B1%5D)%0A if num_args %3E 2:%0A dmenu_run_args%5B'height'%5D = int(args%5B2%5D)%0A if num_args %3E 3:%0A dmenu_run_args%5B'extra_args'%5D = args%5B3%5D%0A%0A # Determine propper height and width for input into dmenu%0A dmenu_run_args%5B'width'%5D = x_width - (2 * dmenu_run_args%5B'x'%5D)%0A dmenu_run_args%5B'y'%5D = (x_height - dmenu_run_args%5B'height'%5D) / 2%0A return dmenu_run_args%0A%0A%0Adef main(args):%0A dmenu_run_args = parse_dmenu_args(args)%0A return system((%22dmenu_run %7Bextra_args%7D -w %7Bwidth%7D -x %7Bx%7D -y %7By%7D%22%0A %22 -h %7Bheight%7D%22).format(**dmenu_run_args))%0A%0Aif __name__ == '__main__':%0A sys.exit(main(sys.argv))%0A
|
|
a737c7cd26450ac5dfdab23aea6902f53976c538
|
fix version in xformhistory for multiple xml versions
|
onadata/apps/fsforms/management/commands/fix_xml_version_in_xformhistory.py
|
onadata/apps/fsforms/management/commands/fix_xml_version_in_xformhistory.py
|
Python
| 0 |
@@ -0,0 +1,2097 @@
+import os%0Afrom onadata.settings.local_settings import XML_VERSION_MAX_ITER%0Afrom onadata.apps.fsforms.models import XformHistory%0Afrom django.core.management.base import BaseCommand%0Aimport re%0Aimport datetime%0A%0Adef check_version(instance, n):%0A for i in range(n, 0, -1):%0A p = re.compile(%22%22%22%3Cbind calculate=%22%5C'(.*)%5C'%22 nodeset=%22/(.*)/_version__00%7B0%7D%22 %22%22%22.format(i))%0A m = p.search(instance)%0A if m:%0A return m.group(1)%0A%0A %0Aclass Command(BaseCommand):%0A help = 'Fix FInstance version for multiple versions in xml'%0A%0A # def add_arguments(self, parser):%0A # parser.add_argument('--file', type=str)%0A%0A def handle(self, *args, **options):%0A batchsize = options.get(%22batchsize%22, 100)%0A stop = False%0A offset = 0%0A while stop is not True:%0A limit = offset + batchsize%0A xformhists = XformHistory.objects.all()%5Boffset:limit%5D%0A inst = list(xformhists)%0A if xformhists:%0A self.stdout.write(%22Updating instances from #%7B%7D to #%7B%7D%5Cn%22.format(%0A inst%5B0%5D.id,%0A inst%5B-1%5D.id))%0A %0A for xformhist in xformhists:%0A version = ''%0A n = XML_VERSION_MAX_ITER%0A xml = xformhist.xml%0A version = check_version(xml, n)%0A if version:%0A xformhist.version = version%0A%0A if not version:%0A p = re.compile(%22%22%22%3Cbind calculate=%22%5C'(.*)%5C'%22 nodeset=%22/(.*)/_version_%22 %22%22%22)%0A m = p.search(xml)%0A if m:%0A xformhist.version = m.group(1)%0A %0A else:%0A p1 = re.compile(%22%22%22%3Cbind calculate=%22%5C'(.*)%5C'%22 nodeset=%22/(.*)/__version__%22 %22%22%22)%0A m1 = p1.search(xml)%0A if m1:%0A xformhist.version = m1.group(1)%0A else:%0A stop = True%0A%0A offset += batchsize
|
|
3837329e0d49796cfe9eabd2aeb026c206c5c4d8
|
add admin ui for user upload record
|
corehq/apps/user_importer/admin.py
|
corehq/apps/user_importer/admin.py
|
Python
| 0 |
@@ -0,0 +1,855 @@
+import zipfile%0A%0Afrom io import BytesIO%0A%0Afrom django.contrib import admin%0Afrom django.http.response import HttpResponse%0A%0Afrom .models import UserUploadRecord%0A%0A%0Aclass UserUploadAdmin(admin.ModelAdmin):%0A list_display = ('domain', 'date_created')%0A list_filter = ('domain',)%0A ordering = ('-date_created',)%0A search_fields =('domain',)%0A actions = %5B'download_file'%5D%0A%0A def download_file(self, request, queryset):%0A export_file = BytesIO()%0A with zipfile.ZipFile(export_file, 'w') as zip_file:%0A for upload_record in queryset:%0A upload_file = upload_record.get_file()%0A zip_file.writestr(f'%7Bupload_record.task_id%7D.csv', upload_file.read())%0A export_file.seek(0)%0A return HttpResponse(export_file, content_type='application/zip')%0A%0Aadmin.site.register(UserUploadRecord, UserUploadAdmin)%0A
|
|
ba5a358ffefb5646a3911fafe2c394c9c52905f7
|
add import script for Horsham
|
polling_stations/apps/data_collection/management/commands/import_horsham.py
|
polling_stations/apps/data_collection/management/commands/import_horsham.py
|
Python
| 0 |
@@ -0,0 +1,343 @@
+from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter%0A%0Aclass Command(BaseXpressDemocracyClubCsvImporter):%0A council_id = 'E07000227'%0A addresses_name = 'Democracy_Club__04May2017.tsv'%0A stations_name = 'Democracy_Club__04May2017.tsv'%0A elections = %5B'local.west-sussex.2017-05-04'%5D%0A csv_delimiter = '%5Ct'%0A
|
|
3d4e3be7624f099f9b15c24a9161f474a733ebff
|
add a script for manual fixing of user profiles. Now I will fix the bug in the code…
|
scripts/20130625_fix_userprofile_data_being_unicode_insteadof_dict.py
|
scripts/20130625_fix_userprofile_data_being_unicode_insteadof_dict.py
|
Python
| 0 |
@@ -0,0 +1,293 @@
+%0Afrom oneflow.profiles.models import UserProfile%0Aimport ast%0A%0Afor p in UserProfile.objects.all():%0A if type(p.data) == type(u''):%0A p.data = %7B%7D%0A%0A if type(p.register_request_data) == type(u''):%0A p.register_request_data = ast.literal_eval(p.register_request_data)%0A%0A p.save()%0A
|
|
a3a48824b36ef62edaf128379f1baec5482166e7
|
Save error_message for resources (SAAS-982)
|
src/nodeconductor_saltstack/migrations/0005_resource_error_message.py
|
src/nodeconductor_saltstack/migrations/0005_resource_error_message.py
|
Python
| 0 |
@@ -0,0 +1,650 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('nodeconductor_saltstack', '0004_remove_useless_spl_fields'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='domain',%0A name='error_message',%0A field=models.TextField(blank=True),%0A preserve_default=True,%0A ),%0A migrations.AddField(%0A model_name='site',%0A name='error_message',%0A field=models.TextField(blank=True),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
bfb51cadc66f34a67686bef3b15e9197c9d0617b
|
Create ping_help.py
|
ping_help.py
|
ping_help.py
|
Python
| 0.000023 |
@@ -0,0 +1,237 @@
+import time%0Aimport subprocess%0Aimport os%0A%0Ahostname=raw_input('')%0A%0A#while 1:%0Aos.system(%22ping -c 10 -i 5 %22 + hostname + %22 %3E1.txt%22)%0Aos.system(%22awk -F'%5B= %5D' '%7Bprint $6,$10%7D' %3C 1.txt %3Efinal.txt%22)%0Aos.system(%22grep %5B0-9%5D final.txt %3Efinal1.txt%22)%0A%0A
|
|
dacffcb3e79877e1ea5e71d1a2e67bd4edd865bf
|
Add SettingOverrideModel that exposes a SettingOverrideDecorator to QML
|
plugins/Tools/PerObjectSettingsTool/SettingOverrideModel.py
|
plugins/Tools/PerObjectSettingsTool/SettingOverrideModel.py
|
Python
| 0 |
@@ -0,0 +1,2061 @@
+# Copyright (c) 2015 Ultimaker B.V.%0A# Uranium is released under the terms of the AGPLv3 or higher.%0A%0Afrom PyQt5.QtCore import Qt, pyqtSlot, QUrl%0A%0Afrom UM.Application import Application%0Afrom UM.Qt.ListModel import ListModel%0A%0Aclass SettingOverrideModel(ListModel):%0A KeyRole = Qt.UserRole + 1%0A LabelRole = Qt.UserRole + 2%0A DescriptionRole = Qt.UserRole + 3%0A ValueRole = Qt.UserRole + 4%0A TypeRole = Qt.UserRole + 5%0A UnitRole = Qt.UserRole + 6%0A ValidRole = Qt.UserRole + 7%0A%0A def __init__(self, decorator, parent = None):%0A super().__init__(parent)%0A%0A self._decorator = decorator%0A self._decorator.settingAdded.connect(self._onSettingsChanged)%0A self._decorator.settingRemoved.connect(self._onSettingsChanged)%0A self._decorator.settingValueChanged.connect(self._onSettingValueChanged)%0A self._onSettingsChanged()%0A%0A self.addRoleName(self.KeyRole, %22key%22)%0A self.addRoleName(self.LabelRole, %22label%22)%0A self.addRoleName(self.DescriptionRole, %22description%22)%0A self.addRoleName(self.ValueRole,%22value%22)%0A self.addRoleName(self.TypeRole, %22type%22)%0A self.addRoleName(self.UnitRole, %22unit%22)%0A self.addRoleName(self.ValidRole, %22valid%22)%0A%0A%0A def _onSettingsChanged(self):%0A self.clear()%0A%0A active_instance = Application.getInstance().getMachineManager().getActiveMachineInstance()%0A%0A for key, value in self._decorator.getAllSettings().items():%0A setting = active_instance.getSettingByKey(key)%0A if not setting:%0A continue%0A%0A self.appendItem(%7B%0A %22key%22: key,%0A %22label%22: setting.getLabel(),%0A %22description%22: setting.getDescription(),%0A %22value%22: value,%0A %22type%22: setting.getType(),%0A %22unit%22: setting.getUnit(),%0A %22valid%22: setting.validate()%0A %7D)%0A%0A def _onSettingValueChanged(self, key, value):%0A index = self.find(%22key%22, key)%0A if index != -1:%0A self.setProperty(index, %22value%22, value)%0A
|
|
3652f1c666f3bf482862727838f0b4bbc9fea5e9
|
fix bug 1076270 - add support for Windows 10
|
alembic/versions/17e83fdeb135_bug_1076270_support_windows_10.py
|
alembic/versions/17e83fdeb135_bug_1076270_support_windows_10.py
|
Python
| 0 |
@@ -0,0 +1,924 @@
+%22%22%22bug 1076270 - support windows 10%0A%0ARevision ID: 17e83fdeb135%0ARevises: 52dbc7357409%0ACreate Date: 2014-10-03 14:03:29.837940%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = '17e83fdeb135'%0Adown_revision = '52dbc7357409'%0A%0Afrom alembic import op%0Afrom socorro.lib import citexttype, jsontype, buildtype%0Afrom socorro.lib.migrations import fix_permissions, load_stored_proc%0A%0Aimport sqlalchemy as sa%0Afrom sqlalchemy import types%0Afrom sqlalchemy.dialects import postgresql%0Afrom sqlalchemy.sql import table, column%0A%0A%0Adef upgrade():%0A op.execute(%22%22%22%0A INSERT INTO os_versions%0A (major_version, minor_version, os_name, os_version_string)%0A VALUES (6, 4, 'Windows', 'Windows 10')%0A %22%22%22)%0A%0A%0Adef downgrade():%0A op.execute(%22%22%22%0A DELEE FROM os_versions%0A WHERE major_version = 6%0A AND minor_version = 4%0A AND os_name = 'Windows'%0A AND os_version_string = 'Windows 10'%0A %22%22%22)%0A
|
|
f5ada694fae30f15498c775e8c4aa14a08459251
|
Add slogan plugin
|
plugins/slogan.py
|
plugins/slogan.py
|
Python
| 0.000001 |
@@ -0,0 +1,586 @@
+import re%0Aimport requests%0Aimport urllib.parse%0A%0Aclass Plugin:%0A def __call__(self, bot):%0A bot.on_respond(r%22slogan(?:ise)? (.*)$%22, self.on_respond)%0A bot.on_help(%22slogan%22, self.on_help)%0A%0A def on_respond(self, bot, msg, reply):%0A url = %22http://www.sloganizer.net/en/outbound.php?slogan=%7B0%7D%22.format(urllib.parse.quote(msg%5B%22match%22%5D%5B0%5D))%0A headers = %7B %22User-Agent%22: %22SmartBot%22 %7D%0A%0A page = requests.get(url, headers=headers)%0A reply(re.sub(%22%3C.*?%3E%22, %22%22, page.text))%0A%0A def on_help(self, bot, msg, reply):%0A reply(%22Syntax: slogan%5Bise%5D %3Cthing%3E%22)%0A
|
|
779fb015913a17fcb8fb290515845e6b47c3ae50
|
Create the converter (with span-conversion functionality)
|
latex2markdown.py
|
latex2markdown.py
|
Python
| 0 |
@@ -0,0 +1,680 @@
+%22%22%22%0AA Very simple tool to convert latex documents to markdown documents%0A%22%22%22%0Aimport re%0A%0A%0Aspan_substitutions = %5B%0A (r'%5C%5Cemph%5C%7B(.+)%5C%7D', r'*%5C1*'),%0A (r'%5C%5Ctextbf%5C%7B(.+)%5C%7D', r'**%5C1**'),%0A (r'%5C%5Cverb;(.+);', r'%60%5C1%60'),%0A (r'%5C%5Cincludegraphics%5C%7B(.+)%5C%7D', r'!%5B%5D(%5C1)'),%0A %5D%0A%0Adef convert_span_elements(line):%0A %22%22%22 Converts all recognizable span elements into markdown%0A %22%22%22%0A for (f, r) in span_substitutions:%0A p = re.compile(f)%0A line = p.sub(r, line)%0A return line%0A%0A# This next bit is to test the conversion as it builds%0Afrom sys import stdin%0Aif __name__==%22__main__%22:%0A for line in stdin:%0A print(convert_span_elements(line),end='')%0A%0A
|
|
1ede6cd46c0fdde24a3e859e125a8c4929cd7170
|
Add test for no task
|
projects/tests/test_resources.py
|
projects/tests/test_resources.py
|
import sure
from mock import MagicMock
from django.contrib.auth.models import User
from tastypie.test import ResourceTestCase
from tools.tests import MockGithubMixin
from tasks.models import Tasks
from .. import models
from . import factories
class ProjectsResourceCase(MockGithubMixin, ResourceTestCase):
"""User projects resources case"""
def setUp(self):
MockGithubMixin.setUp(self)
ResourceTestCase.setUp(self)
self._mock_make_https()
self.user = User.objects.create_user(
'test', '[email protected]', 'test',
)
self.api_client.client.login(
username='test',
password='test',
)
self.url = '/api/v1/projects/project/'
def _mock_make_https(self):
"""Mock make_https"""
self._orig_make_https = models.make_https
models.make_https = MagicMock(return_value='')
def tearDown(self):
super(ProjectsResourceCase, self).tearDown()
models.make_https = self._orig_make_https
def test_read_list(self):
"""Test read list"""
User.github.get_user.return_value.get_repos.return_value =\
map(self._create_repo, range(10))
response = self.api_client.get('{}?fetch=true'.format(self.url))
response.status_code.should.be.equal(200)
self.deserialize(response)['meta']['total_count'].should.be.equal(10)
def test_read_self(self):
"""Test read self"""
project = factories.ProjectFactory.create(owner=self.user)
response = self.api_client.get('{}{}/'.format(self.url, project.name))
response.status_code.should.be.equal(200)
def test_read_other(self):
"""Test read other"""
project = factories.ProjectFactory.create()
response = self.api_client.get('{}{}/'.format(self.url, project.name))
response.status_code.should.be.equal(200)
def test_read_other_private(self):
"""Test read other private project"""
project = factories.ProjectFactory.create(is_private=True)
response = self.api_client.get('{}{}/'.format(self.url, project.name))
response.status_code.should.be.equal(401)
def test_update_is_enabled(self):
"""Test update is_enabled"""
project = factories.ProjectFactory.create(owner=self.user)
response = self.api_client.put(
'{}{}/'.format(self.url, project.name), data={
'is_enabled': True,
},
)
response.status_code.should.be.within([202, 204])
models.Project.objects.get().is_enabled.should.be.true
def test_not_update_name(self):
"""Test not update name"""
project = factories.ProjectFactory.create(
owner=self.user, name='test',
)
response = self.api_client.put(
'{}{}/'.format(self.url, project.name), data={
'name': 'nope',
},
)
response.status_code.should.be.within([202, 204])
models.Project.objects.get().name.should.be.equal('test')
def test_not_update_url(self):
"""Test not update url"""
project = factories.ProjectFactory.create(
owner=self.user, url='http://test.test',
)
response = self.api_client.put(
'{}{}/'.format(self.url, project.name), data={
'url': 'http://nope.nope/',
},
)
response.status_code.should.be.within([202, 204])
models.Project.objects.get().url.should.be.equal('http://test.test')
def test_get_enabled(self):
"""Test get enabled"""
factories.ProjectFactory.create_batch(
10, owner=self.user, is_enabled=True,
)
factories.ProjectFactory.create_batch(
30, owner=self.user, is_enabled=False,
)
response = self.api_client.get('{}'.format(self.url))
response.status_code.should.be.equal(200)
self.deserialize(response)['meta']['total_count'].should.be.equal(10)
def test_has_token_if_owner(self):
"""Test has token if owner"""
project = factories.ProjectFactory(owner=self.user)
response = self.api_client.get('{}{}/'.format(
self.url,
project.name,
))
response.status_code.should.be.equal(200)
self.deserialize(response)['token'].should.be.equal(project.token)
def test_blank_token_if_not_owner(self):
"""Test has blank token if owner"""
project = factories.ProjectFactory()
response = self.api_client.get('{}{}/'.format(
self.url,
project.name,
))
response.status_code.should.be.equal(200)
self.deserialize(response)['token'].should.be.equal('')
def test_regenerate_token(self):
"""Test regenerate token"""
project = factories.ProjectFactory(owner=self.user)
self.api_client.put('{}{}/'.format(
self.url,
project.name,
), data={'token': None})
updated = models.Project.objects.get(id=project.id)
updated.token.should_not.be.equal(project.token)
def test_attach_success_percent(self):
"""Test attach success percent"""
project = factories.ProjectFactory(owner=self.user)
response = self.api_client.get(
'{}{}/?with_success_percent=true'.format(self.url, project.name),
)
self.deserialize(response)['success_percents'].should.be.equal([])
def test_attach_last_task(self):
"""Test attach last task"""
project = factories.ProjectFactory(owner=self.user)
task_id = Tasks.save({
'project': project.name,
})
response = self.api_client.get(
'{}{}/?with_last_task=true'.format(self.url, project.name),
)
self.deserialize(response)['last_task']['_id'].should.be\
.equal(str(task_id))
|
Python
| 0.999172 |
@@ -5908,8 +5908,355 @@
sk_id))%0A
+%0A def test_attach_last_task_without_tasks(self):%0A %22%22%22Test attach last task without tasks%22%22%22%0A project = factories.ProjectFactory(owner=self.user)%0A response = self.api_client.get(%0A '%7B%7D%7B%7D/?with_last_task=true'.format(self.url, project.name),%0A )%0A self.deserialize(response)%5B'last_task'%5D.should.be.none%0A
|
af86a3ad67747ea42fe2ed6a40d582c10fce319e
|
fix error when open subreddit
|
rtv/subreddit.py
|
rtv/subreddit.py
|
import curses
import sys
import webbrowser
from requests.exceptions import HTTPError
from .errors import SubredditNameError
from .page import BasePage
from .submission import SubmissionPage
from .content import SubredditContent
from .utils import LoadScreen, text_input, display_message, Color, ESCAPE
class SubredditPage(BasePage):
def __init__(self, stdscr, reddit, name):
self.reddit = reddit
self.name = name
self.loader = LoadScreen(stdscr)
content = SubredditContent.from_name(reddit, name, self.loader)
super(SubredditPage, self).__init__(stdscr, content)
def loop(self):
self.draw()
while True:
cmd = self.stdscr.getch()
if cmd in (curses.KEY_UP, ord('k')):
self.move_cursor_up()
self.clear_input_queue()
elif cmd in (curses.KEY_DOWN, ord('j')):
self.move_cursor_down()
self.clear_input_queue()
elif cmd in (curses.KEY_RIGHT, curses.KEY_ENTER, ord('l')):
self.open_submission()
self.draw()
elif cmd == ord('o'):
self.open_link()
elif cmd == ord('/'):
self.prompt_subreddit()
self.draw()
elif cmd in (curses.KEY_F5, ord('r')):
self.refresh_content()
self.draw()
elif cmd == curses.KEY_RESIZE:
self.draw()
elif cmd == ord('q'):
sys.exit()
else:
curses.beep()
def refresh_content(self, name=None):
name = name or self.name
try:
self.content = SubredditContent.from_name(
self.reddit, name, self.loader)
except (SubredditNameError, HTTPError):
display_message(self.stdscr, 'Invalid Subreddit')
else:
self.nav.page_index, self.nav.cursor_index = 0, 0
self.nav.inverted = False
self.name = name
def prompt_subreddit(self):
attr = curses.A_BOLD | Color.MAGENTA
prompt = 'Enter Subreddit: /r/'
n_rows, n_cols = self.stdscr.getmaxyx()
self.stdscr.addstr(n_rows-1, 0, prompt, attr)
self.stdscr.refresh()
window = self.stdscr.derwin(n_rows-1, len(prompt))
window.attrset(attr)
out = text_input(window)
if out is not None:
self.refresh_content(name=out)
def open_submission(self):
"Select the current submission to view posts"
submission = self.content.get(self.nav.absolute_index)['object']
page = SubmissionPage(self.stdscr, self.reddit, submission=submission)
page.loop()
def open_link(self):
url = self.content.get(self.nav.absolute_index)['url_full']
webbrowser.open_new_tab(url)
@staticmethod
def draw_item(win, data, inverted=False):
n_rows, n_cols = win.getmaxyx()
n_cols -= 1 # Leave space for the cursor in the first column
# Handle the case where the window is not large enough to fit the data.
valid_rows = range(0, n_rows)
offset = 0 if not inverted else -(data['n_rows'] - n_rows)
n_title = len(data['split_title'])
for row, text in enumerate(data['split_title'], start=offset):
if row in valid_rows:
attr = curses.A_BOLD
win.addstr(row, 1, text, attr)
row = n_title + offset
if row in valid_rows:
attr = curses.A_UNDERLINE | Color.BLUE
text = '{url}'.format(**data)
win.addnstr(row, 1, text, n_cols-1, attr)
row = n_title + offset + 1
if row in valid_rows:
text = '{created} {comments} {score}'.format(**data)
win.addnstr(row, 1, text, n_cols-1)
row = n_title + offset + 2
if row in valid_rows:
text = '{author}'.format(**data)
win.addnstr(row, 1, text, n_cols-1, curses.A_BOLD)
text = ' {subreddit}'.format(**data)
win.addnstr(text, n_cols - win.getyx()[1], Color.YELLOW)
|
Python
| 0.000005 |
@@ -2318,16 +2318,38 @@
.derwin(
+1, n_cols-len(prompt),
n_rows-1
@@ -4162,8 +4162,9 @@
.YELLOW)
+%0A
|
2fdabf544c75096efafe2d14988efa28619643ab
|
add scheme
|
app/scheme_mongodb.py
|
app/scheme_mongodb.py
|
Python
| 0.000032 |
@@ -0,0 +1,2794 @@
+import pymongo%0Aimport bson%0Afrom bson import json_util%0Aimport warnings%0Afrom cStringIO import StringIO%0Afrom pymongo import Connection, uri_parser%0Aimport bson.son as son%0Aimport json%0Aimport logging%0A%0Adef open(url=None, task=None):%0A #parses a mongodb uri and returns the database%0A #%22mongodb://localhost/test.in?query='%7B%22key%22: value%7D'%22%0A uri = url if url else %22mongodb://localhost/test.in%22%0A%0A #print 'uri: ' + uri%0A params = uri.split('?', 1)%0A uri = params%5B0%5D%0A uri_info = uri_parser.parse_uri(uri)%0A query = None%0A #TODO test flow from a query%0A #parse json to a dict = q_d%0A # %5E%5E this is where we use json_util.object_hook%0A #SON()%5B'query'%5D = q_d%5B'query'%5D%0A #for k,v in q_d.iteritems:%0A # if k not %22query%22:%0A # SON%5Bk%5D = v%0A options = %7B%7D%0A if len(params) %3E 1:%0A params = params%5B1%5D%0A list_of_params = params.split('&', 1)%0A for p in params:%0A name, json_obj = params.split('=')%0A if name == 'query':%0A query = son.SON(json.loads(json_obj, object_hook=json_util.object_hook))%0A else:%0A options%5Bname%5D = json_obj%0A%0A '''%0A query = son.SON()%0A li_q = json.loads(json_query)%0A for tupl in li_q:%0A if tupl%5B0%5D == %22$max%22 or tupl%5B0%5D == %22$min%22:%0A obj_id = bson.objectid.ObjectId(tupl%5B1%5D)%0A query%5Btupl%5B0%5D%5D = %7Bu'_id' : obj_id%7D%0A else:%0A query%5Btupl%5B0%5D%5D = tupl%5B1%5D%0A '''%0A if not query:%0A query = %7B%7D%0A%0A #go around: connect to the sonnection then choose db by %5B'dbname'%5D%0A with warnings.catch_warnings():%0A warnings.simplefilter(%22ignore%22)%0A connection = Connection(uri)%0A database_name = uri_info%5B'database'%5D%0A collection_name = uri_info%5B'collection'%5D%0A db = connection%5Bdatabase_name%5D%0A collection = db%5Bcollection_name%5D%0A%0A cursor = collection.find(query, None)%0A%0A wrapper = MongoWrapper(cursor)%0A return wrapper%0A #WRAPPED!%0A%0A%0Aclass MongoWrapper(object):%0A %22%22%22Want to wrap the cursor in an object that%0A supports the following operations: %22%22%22%0A%0A def __init__(self, cursor):%0A self.cursor = cursor%0A self.offset = 0%0A%0A def __iter__(self):%0A #most important method%0A for rec in self.cursor:%0A yield rec%0A%0A def __len__(self):%0A #may need to do this more dynamically (see lib/disco/comm.py ln 163)%0A return self.cursor.count()%0A%0A def close(self):%0A self.cursor.close()%0A%0A @property%0A def read(self, size=-1):%0A list_of_records = %5B%5D%0A if size %3E 0:%0A for i in range(size):%0A list_of_records.append(self.cursor.__iter__())%0A return list_of_records%0A%0A%0Adef input_stream(stream, size, url, params):%0A mon = open(url)%0A return mon%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.