commit
stringlengths 40
40
| subject
stringlengths 1
3.25k
| old_file
stringlengths 4
311
| new_file
stringlengths 4
311
| old_contents
stringlengths 0
26.3k
| lang
stringclasses 3
values | proba
float64 0
1
| diff
stringlengths 0
7.82k
|
---|---|---|---|---|---|---|---|
03ad7302f75ea5de0870c798ec70f1a1912288ca
|
Add main.py file Description for 'hello! hosvik'
|
src/main.py
|
src/main.py
|
Python
| 0.000001 |
@@ -0,0 +1,55 @@
+import sys%0Aprint(sys.platform);%0Aprint('Hello hosvik!')%0A
|
|
054c75ce1a63732be7a58ec1150e9f8aaff2aedb
|
Create test.py
|
plugins/test.py
|
plugins/test.py
|
Python
| 0.000005 |
@@ -0,0 +1,135 @@
[email protected]_handler(commands=%5B'test', 'toast'%5D)%0Adef send_test(message):%0A bot.send_message(message.chat.id, TEST_MSG.encode(%22utf-8%22))%0A
|
|
5c9ffaaa8e244bb9db627a0408258750cc0e81d6
|
Create ping.py
|
src/ping.py
|
src/ping.py
|
Python
| 0.000003 |
@@ -0,0 +1,6 @@
+nisse%0A
|
|
1473e0f4f1949349ef7212e0755fa8ffa6401cbe
|
Create process_htk_mlf_zh.py
|
process_htk_mlf_zh.py
|
process_htk_mlf_zh.py
|
Python
| 0 |
@@ -0,0 +1,1106 @@
+#!/usr/bin/env python%0A#%0A# This script reads in a HTK MLF format label file and converts the %0A# encoded contents to GBK encoding.%0A#%0A%0Aimport string, codecs%0Afin=open('vom_utt_wlab.mlf')%0Afout=codecs.open('vom_utt_wlab.gbk.mlf', encoding='gbk', mode='w')%0Awhile True:%0A sr=fin.readline()%0A if sr=='':break%0A sr=sr.strip()%0A if sr.endswith('.lab%22'):%0A print %3E%3Efout, sr%0A while True:%0A sr=(fin.readline()).strip()%0A if sr=='.':break%0A if sr.startswith('%5C%5C'):%0A lst=(sr.strip('%5C%5C')).split('%5C%5C') # get the list of octal representation of each byte%0A bins=bytearray()%0A for itm in lst:%0A val=0%0A for ii in range(3): # each octal number will have exactly 3 numbers, i.e. of the form %5Cnnn%0A val=val*8%0A val=val+int(itm%5Bii%5D)%0A bins.append(val)%0A print %3E%3Efout, bins.decode('gbk')%0A else:%0A print %3E%3Efout, sr%0A print %3E%3Efout, '.'%0A else:%0A print %3E%3Efout, sr%0Afin.close()%0Afout.close()%0A
|
|
c8c807cfcb4422edc0e2dbe3a4673a62fa37cbfa
|
Add extra migration triggered by updated django / parler (#501)
|
djangocms_blog/migrations/0037_auto_20190806_0743.py
|
djangocms_blog/migrations/0037_auto_20190806_0743.py
|
Python
| 0 |
@@ -0,0 +1,2240 @@
+# Generated by Django 2.1.11 on 2019-08-06 05:43%0A%0Afrom django.conf import settings%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0Aimport parler.fields%0Aimport taggit_autosuggest.managers%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('djangocms_blog', '0036_auto_20180913_1809'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterField(%0A model_name='authorentriesplugin',%0A name='authors',%0A field=models.ManyToManyField(limit_choices_to=%7B'djangocms_blog_post_author__publish': True%7D, to=settings.AUTH_USER_MODEL, verbose_name='authors'),%0A ),%0A migrations.AlterField(%0A model_name='blogcategorytranslation',%0A name='master',%0A field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogCategory'),%0A ),%0A migrations.AlterField(%0A model_name='blogconfigtranslation',%0A name='master',%0A field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.BlogConfig'),%0A ),%0A migrations.AlterField(%0A model_name='latestpostsplugin',%0A name='tags',%0A field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='Show only the blog articles tagged with chosen tags.', related_name='djangocms_blog_latest_post', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='filter by tag'),%0A ),%0A migrations.AlterField(%0A model_name='post',%0A name='tags',%0A field=taggit_autosuggest.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', related_name='djangocms_blog_tags', through='taggit.TaggedItem', to='taggit.Tag', verbose_name='Tags'),%0A ),%0A migrations.AlterField(%0A model_name='posttranslation',%0A name='master',%0A field=parler.fields.TranslationsForeignKey(editable=False, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='translations', to='djangocms_blog.Post'),%0A ),%0A %5D%0A
|
|
e082c803bf5ce31c4948d0d512e9ec0366cf0adc
|
Create politeusersbot.py
|
politeusersbot.py
|
politeusersbot.py
|
Python
| 0.00002 |
@@ -0,0 +1,1932 @@
+#Polite Users Bot created by Kooldawgstar%0A%0Aimport praw%0Afrom time import sleep%0Aimport random%0A%0AUSERNAME = %22USERNAME%22%0APASSWORD = %22PASSWORD%22%0ALIMIT = 100%0ARESPONSES = %5B%22Thanks for being a nice user and thanking people for help!%22,%0A %22Thank you for being a nice user and thanking people for help!%22,%0A %5D%0A%0Aresponded = set()%0A%0Ar = praw.Reddit(user_agent=%22Enter in Useragent here%22)%0Ar.login(USERNAME, PASSWORD)%0Asubreddit = r.get_subreddit(%22Polite_Users_Bot%22)%0A%0Adef meets_criteria(responded, comment):%0A #add whatever criteria/logic you want here%0A return (not str(comment.author) == USERNAME) and (not comment.id in responded) and (%22thanks%22 , %22thx%22 , %22thank you%22 , %22thank u%22 in comment.body.lower())%0A%0Adef generate_response(comment):%0A #generate whatever response you want, you can make it specific to a comment by checking for various conditions%0A return random.choice(RESPONSES)%0A%0Awhile True:%0A for comment in subreddit.get_comments(limit=LIMIT):%0A if meets_criteria(responded, comment):%0A print (comment.body)%0A print (comment.id)%0A print (str(comment.author))%0A while True: #continue to try responding to the comment until it works, unless something unknown occurs%0A try:%0A comment.reply(generate_response(comment))%0A print (%22Breaking out after responding, and adding to the list%22)%0A responded.add(comment.id)%0A break%0A except praw.errors.RateLimitExceeded:%0A print (%22Sleeping, rate limit :(%22)%0A sleep(10*60) #sleep for 10 minutes, that's the timer limit%0A except:%0A print (%22Some unknown error has occurred, bail out...%22)%0A break%0A print (%22---------------------------------------%5Cn%5Cn%22)%0A print (%22sleeping%22)%0A sleep(60) #sleep for a minute for new comments to show up%0A
|
|
94481f656690956b2a4eb5a1227948d24ba4cc05
|
Add actual command line python function (#7)
|
bin/CCDSingleEpochStile.py
|
bin/CCDSingleEpochStile.py
|
Python
| 0.00002 |
@@ -0,0 +1,120 @@
+#!/usr/bin/env python%0A%0Afrom stile.lsst.base_tasks import CCDSingleEpochStileTask%0A%0ACCDSingleEpochStileTask.parseAndRun()%0A
|
|
225d5232cca6bb42e39959b2330758225a748477
|
add little script to retrieve URLs to PS1-DR1 images
|
py/legacyanalysis/get-ps1-skycells.py
|
py/legacyanalysis/get-ps1-skycells.py
|
Python
| 0 |
@@ -0,0 +1,1397 @@
+import requests%0Afrom astrometry.util.fits import *%0Afrom astrometry.util.multiproc import *%0A%0Adef get_cell((skycell, subcell)):%0A url = 'http://ps1images.stsci.edu/cgi-bin/ps1filenames.py?skycell=%25i.%2503i' %25 (skycell, subcell)%0A print('Getting', url)%0A r = requests.get(url)%0A lines = r.text.split('%5Cn')%0A #assert(len(lines) == 6)%0A cols = 'projcell subcell ra dec filter mjd type filename shortname'%0A assert(lines%5B0%5D == cols)%0A lines = lines%5B1:%5D%0A lines = %5Bl.split() for l in lines%5D%0A T = fits_table()%0A types = dict(projcell=np.int16, subcell=np.int16, ra=np.float64, dec=np.float64, mjd=None)%0A types%5B'type'%5D =None%0A for i,col in enumerate(cols.split()):%0A tt = types.get(col, str)%0A if tt is None:%0A continue%0A vals = %5Bwords%5Bi%5D for words in lines%5D%0A #print('Values for', col, ':', vals)%0A # HACK -- base-10 parsing for integer subcell%0A if col == 'subcell':%0A vals = %5Bint(v, 10) for v in vals%5D%0A T.set(col, np.array(%5Btt(v) for v in vals%5D, dtype=tt))%0A return T%0A %0Amp = multiproc(8)%0A%0ATT = %5B%5D%0Afor skycell in range(635, 2643+1):%0A args = %5B%5D%0A for subcell in range(100):%0A args.append((skycell, subcell))%0A TTi = mp.map(get_cell, args)%0A%0A Ti = merge_tables(TTi)%0A Ti.writeto('ps1skycells-%25i.fits' %25 skycell)%0A TT.extend(TTi)%0AT = merge_tables(TT)%0AT.writeto('ps1skycells.fits')%0A %0A
|
|
0f5ecc42485d4f0e89fbe202b57a2e7735ea69cc
|
Create product_images.py
|
product_images.py
|
product_images.py
|
Python
| 0.000012 |
@@ -0,0 +1,320 @@
+from openerp.osv import osv, fields%0A%0Aclass product_template(osv.Model):%0A _inherit = 'product.template'%0A %0A _columns = %7B%0A 'x_secondpicture': fields.binary(%22Second Image%22,%0A help=%22This field holds the second image used as image for the product, limited to 1024x1024px.%22),%0A %7D%0Aproduct_template()%0A
|
|
216a51b21484307a38bc0d5147922db189b7ab62
|
Fix compatibility with Python 2.4
|
rbtools/commands/setup_repo.py
|
rbtools/commands/setup_repo.py
|
import os
from rbtools.commands import Command, CommandError, Option
from rbtools.utils.console import confirm
from rbtools.utils.filesystem import CONFIG_FILE
class SetupRepo(Command):
"""Configure a repository to point to a Review Board server.
Interactively creates the configuration file .reviewboardrc in the current
working directory.
The user is prompted for the Review Board server url if it's not supplied
as an option. Upon a successful server connection, an attempt is made to
match the local repository to a repository on the Review Board server.
If no match is found or if the user declines the match, the user is
prompted to choose from other repositories on the Review Board server.
If the client supports it, it attempts to guess the branch name on the
server.
"""
name = "setup-repo"
author = "The Review Board Project"
description = ("Configure a repository to point to a Review Board server "
"by generating the configuration file %s"
% CONFIG_FILE)
args = ""
option_list = [
Option("--server",
dest="server",
metavar="SERVER",
config_key="REVIEWBOARD_URL",
default=None,
help="specify a different Review Board server to use"),
Option("--username",
dest="username",
metavar="USERNAME",
config_key="USERNAME",
default=None,
help="user name to be supplied to the Review Board server"),
Option("--password",
dest="password",
metavar="PASSWORD",
config_key="PASSWORD",
default=None,
help="password to be supplied to the Review Board server"),
]
def prompt_rb_repository(self, tool_name, repository_info, api_root):
"""Interactively prompt to select a matching repository.
The user is prompted to choose a matching repository found on the
Review Board server.
"""
repositories = api_root.get_repositories()
# Go through each matching repo and prompt for a selection. If a
# selection is made, immediately return the selected repo.
try:
while True:
for repo in repositories:
is_match = (
tool_name == repo.tool and
repository_info.path in
(repo['path'], getattr(repo, 'mirror_path', '')))
if is_match:
question = (
"Use the %s repository '%s' (%s)?"
% (tool_name, repo['name'], repo['path']))
if confirm(question):
return repo
repositories = repositories.get_next()
except StopIteration:
pass
return None
def _get_output(self, config):
"""Returns a string output based on the the provided config."""
settings = []
for setting, value in config:
settings.append('%s = "%s"' % (setting, value))
settings.append('')
return '\n'.join(settings)
def generate_config_file(self, file_path, config):
"""Generates the config file in the current working directory."""
try:
outfile = open(file_path, "w")
output = self._get_output(config)
outfile.write(output)
outfile.close()
except IOError as e:
raise CommandError('I/O error generating config file (%s): %s'
% (e.errno, e.strerror))
print "Config written to %s" % file_path
def main(self, *args):
server = self.options.server
if not server:
server = raw_input('Enter the Review Board server URL: ')
repository_info, tool = self.initialize_scm_tool()
api_client, api_root = self.get_api(server)
self.setup_tool(tool, api_root=api_root)
selected_repo = self.prompt_rb_repository(
tool.name, repository_info, api_root)
if not selected_repo:
print ("No %s repository found or selected for %s. %s not created."
% (tool.name, server, CONFIG_FILE))
return
config = [
('REVIEWBOARD_URL', server),
('REPOSITORY', selected_repo['name'])
]
try:
branch = tool.get_current_branch()
config.append(('BRANCH', branch))
except NotImplementedError:
pass
outfile_path = os.path.join(os.getcwd(), CONFIG_FILE)
output = self._get_output(config)
if not os.path.exists(outfile_path):
question = ("Create '%s' with the following?\n\n%s\n"
% (outfile_path, output))
else:
question = ("'%s' exists. Overwrite with the following?\n\n%s\n"
% (outfile_path, output))
if not confirm(question):
return
self.generate_config_file(outfile_path, config)
|
Python
| 0 |
@@ -3573,19 +3573,17 @@
IOError
- as
+,
e:%0A
|
e81f6e01ac55723e015c4d7d9d8f61467378325a
|
Add autoincrement to ZUPC.id
|
migrations/versions/e187aca7c77a_zupc_id_autoincrement.py
|
migrations/versions/e187aca7c77a_zupc_id_autoincrement.py
|
Python
| 0.00001 |
@@ -0,0 +1,677 @@
+%22%22%22ZUPC.id autoincrement%0A%0ARevision ID: e187aca7c77a%0ARevises: ccd5b0142a76%0ACreate Date: 2019-10-21 14:01:10.406983%0A%0A%22%22%22%0A%0A# revision identifiers, used by Alembic.%0Arevision = 'e187aca7c77a'%0Adown_revision = '86b41c3dbd00'%0A%0Afrom alembic import op%0Afrom sqlalchemy.dialects import postgresql%0Afrom sqlalchemy.schema import Sequence, CreateSequence, DropSequence%0Aimport sqlalchemy as sa%0A%0A%0Adef upgrade():%0A op.execute('''%0A CREATE SEQUENCE ZUPC_id_seq;%0A ALTER TABLE %22ZUPC%22 ALTER COLUMN id SET DEFAULT nextval('ZUPC_id_seq');%0A ''')%0A%0A%0Adef downgrade():%0A op.execute('''%0A ALTER TABLE %22ZUPC%22 ALTER COLUMN id DROP DEFAULT;%0A DROP SEQUENCE ZUPC_id_seq%0A ''')%0A
|
|
cc8b3f8a7fb6af29f16d47e4e4caf56f17605325
|
Add command handler.
|
src/server/commandHandler.py
|
src/server/commandHandler.py
|
Python
| 0 |
@@ -0,0 +1,2021 @@
+from src.shared.encode import decodePosition%0A%0A%0Aclass CommandHandler(object):%0A def __init__(self, gameState, connectionManager):%0A self.gameState = gameState%0A self.connectionManager = connectionManager%0A%0A def broadcastMessage(self, *args, **kwargs):%0A self.connectionManager.broadcastMessage(*args, **kwargs)%0A%0A def sendMessage(self, *args, **kwargs):%0A self.connectionManager.sendMessage(*args, **kwargs)%0A%0A def createConnection(self, playerId):%0A playerX, playerY = self.gameState.getPos(playerId)%0A%0A self.sendMessage(playerId, %22your_id_is%22, %5BplayerId%5D)%0A self.broadcastMessage(%22new_obelisk%22, %5BplayerX, playerY%5D)%0A for otherId in self.gameState.positions:%0A # We already broadcast this one to everyone, including ourself.%0A if otherId == playerId:%0A continue%0A otherX, otherY = self.gameState.getPos(otherId)%0A self.sendMessage(%22new_obelisk%22, %5BotherId, otherX, otherY%5D)%0A%0A def removeConnection(self, playerId):%0A self.broadcastMessage(%22delete_obelisk%22, %5BplayerId%5D)%0A self.gameState.removePlayer(playerId)%0A%0A def stringReceived(self, playerId, data):%0A command = data.strip().lower()%0A%0A STEP_SIZE = 1.0%0A RELATIVE_MOVES = %7B%0A 'n': %5B 0.0, STEP_SIZE%5D,%0A 's': %5B 0.0, -STEP_SIZE%5D,%0A 'e': %5B STEP_SIZE, 0.0%5D,%0A 'w': %5B-STEP_SIZE, 0.0%5D,%0A %7D%0A%0A if command in RELATIVE_MOVES:%0A self.gameState.movePlayerBy(playerId,%0A RELATIVE_MOVES%5Bcommand%5D)%0A%0A else:%0A newPos = decodePosition(command)%0A if newPos is not None:%0A self.gameState.movePlayerTo(playerId, newPos)%0A%0A # TODO: Maybe only broadcast the new position if we handled a valid%0A # command? Else the position isn't changed....%0A playerX, playerY = self.gameState.getPos(playerId)%0A self.broadcastMessage(%22set_pos%22, %5BplayerId, playerX, myY%5D)%0A
|
|
be67baac2314408b295bddba3e5e4b2ca9bfd262
|
Add ffs.exceptions
|
ffs/exceptions.py
|
ffs/exceptions.py
|
Python
| 0.00354 |
@@ -0,0 +1,204 @@
+%22%22%22%0Affs.exceptions%0A%0ABase and definitions for all exceptions raised by FFS%0A%22%22%22%0Aclass Error(Exception):%0A %22Base Error class for FFS%22%0A%0Aclass DoesNotExistError(Error):%0A %22Something should have been here%22%0A
|
|
2737e1d46263eff554219a5fa5bad060b8f219d3
|
Add CLI script for scoring huk-a-buk.
|
score_hukabuk.py
|
score_hukabuk.py
|
Python
| 0 |
@@ -0,0 +1,2736 @@
+import json%0Aimport os%0Aimport time%0A%0A%0ADATA = %7B'turns': %7B%7D%7D%0A%0A%0Aclass Settings(object):%0A FILENAME = None%0A CURRENT_TURN = 0%0A NAME_CHOICES = None%0A%0A%0Adef set_filename():%0A filename = raw_input('Set the filename? ').strip()%0A if not filename:%0A filename = str(int(time.time()))%0A Settings.FILENAME = filename + '.json'%0A%0A%0Adef save_game():%0A with open(Settings.FILENAME, 'w') as fh:%0A json.dump(DATA, fh)%0A%0A%0Adef enter_names():%0A names = %7B%7D%0A while True:%0A name = raw_input('Enter name: ')%0A if name.strip() == '':%0A break%0A names%5Bname%5D = -5%0A DATA%5B'names'%5D = names%0A Settings.NAME_CHOICES = '%5Cn'.join(%5B%0A '%25d: %25s' %25 (i, name)%0A for i, name in enumerate(names.keys())%0A %5D)%0A save_game()%0A%0A%0Adef game_over():%0A game_over = raw_input('Is the game over? %5By/n%5D ')%0A return game_over.lower().strip() == 'y'%0A%0A%0Adef get_bidder():%0A actual_bidder = None%0A while actual_bidder is None:%0A print(Settings.NAME_CHOICES)%0A bidder = raw_input('Who won the bid? ')%0A try:%0A bidder = int(bidder)%0A actual_bidder = Settings.NAME_CHOICES%5Bbidder%5D%0A except:%0A if bidder in Settings.NAME_CHOICES:%0A actual_bidder = bidder%0A%0A return actual_bidder%0A%0A%0Adef get_bid():%0A actual_bid = None%0A while actual_bid is None:%0A bid = raw_input('Bid amount? ')%0A try:%0A bid = int(bid)%0A if bid in (2, 3, 4, 5):%0A actual_bid = bid%0A except:%0A pass%0A%0A return actual_bid%0A%0A%0Adef get_points():%0A result = %7B%7D%0A print '=' * 60%0A print 'Scores for turn %25d:' %25 (Settings.CURRENT_TURN,)%0A for name in DATA%5B'names'%5D.keys():%0A msg = 'Score for %25r: ' %25 (name,)%0A actual_score = None%0A while actual_score is None:%0A score = raw_input(msg)%0A try:%0A score = int(score)%0A if score in (-5, 0, 1, 2, 3, 4, 5):%0A actual_score = score%0A except:%0A pass%0A%0A result%5Bname%5D = actual_score%0A DATA%5B'names'%5D%5Bname%5D += actual_score%0A%0A return result%0A%0A%0A%0Adef play_turn():%0A turn = DATA%5B'turns'%5D.setdefault(Settings.CURRENT_TURN, %7B%7D)%0A turn%5B'bidder'%5D = get_bidder()%0A turn%5B'bid'%5D = get_bid()%0A turn%5B'points'%5D = get_points()%0A%0A Settings.CURRENT_TURN += 1%0A save_game()%0A%0A%0Adef print_scores():%0A print '=' * 60%0A print 'Current scores:'%0A print '-' * 60%0A for name, score in DATA%5B'names'%5D.items():%0A print '%25r -%3E %25d' %25 (name, score)%0A print '=' * 60%0A%0A%0Adef play_game():%0A while not game_over():%0A print_scores()%0A play_turn()%0A%0A%0Adef main():%0A set_filename()%0A enter_names()%0A play_game()%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
bb5eb56cd20801ffaf73b3caa69cba6b931d26ac
|
set last_polled in Sugar importer.
|
mysite/search/tasks/trac_instances.py
|
mysite/search/tasks/trac_instances.py
|
import datetime
import logging
from celery.task import Task, PeriodicTask
from celery.registry import tasks
import celery.decorators
import mysite.search.models
import mysite.customs.bugtrackers.trac
### Twisted
class LookAtOneTwistedBug(Task):
def run(self, bug_id, **kwargs):
logger = self.get_logger(**kwargs)
logger.info("Was asked to look at bug %d in Twisted" % bug_id)
tb = mysite.customs.bugtrackers.trac.TracBug(
bug_id=bug_id,
BASE_URL='http://twistedmatrix.com/trac/')
bug_url = tb.as_bug_specific_url()
# If bug is already in our database, and we looked at
# it within the past day, skip the request.
try:
bug_obj = mysite.search.models.Bug.all_bugs.get(
canonical_bug_link=bug_url)
except mysite.search.models.Bug.MultipleObjectsReturned:
# delete all but the first
bug_objs = mysite.search.models.Bug.all_bugs.filter(
canonical_bug_link=bug_url)
bug_obj = bug_objs[0]
for stupid_dup in bug_objs[1:]:
stupid_dup.delete()
except mysite.search.models.Bug.DoesNotExist:
bug_obj = mysite.search.models.Bug(
canonical_bug_link=bug_url)
# Is that bug fresh enough?
if (datetime.datetime.now() - bug_obj.last_polled
) > datetime.timedelta(days=1):
logging.info("Refreshing bug %d from Twisted." %
bug_id)
# if the delta is greater than a day, refresh it.
data = tb.as_data_dict_for_bug_object()
for key in data:
value = data[key]
setattr(bug_obj, key, value)
# And the project...
if not bug_obj.project_id:
project, _ = mysite.search.models.Project.objects.get_or_create(name='Twisted')
bug_obj.project = project
bug_obj.save()
logging.info("Finished with %d from Twisted." % bug_id)
class LearnAboutNewEasyTwistedBugs(PeriodicTask):
run_every = datetime.timedelta(days=1)
def run(self, **kwargs):
logger = self.get_logger(**kwargs)
logger.info('Started to learn about new Twisted easy bugs.')
for bug_id in mysite.customs.bugtrackers.trac.csv_url2list_of_bug_ids(
mysite.customs.bugtrackers.trac.csv_of_bugs(
url='http://twistedmatrix.com/trac/query?status=new&status=assigned&status=reopened&format=csv&keywords=%7Eeasy&order=priority')):
task = LookAtOneTwistedBug()
task.delay(bug_id=bug_id)
logger.info('Finished grabbing the list of Twisted easy bugs.')
class RefreshAllTwistedEasyBugs(PeriodicTask):
run_every = datetime.timedelta(days=1)
def run(self, **kwargs):
logger = self.get_logger(**kwargs)
logger.info("Starting refreshing all easy Twisted bugs.")
for bug in mysite.search.models.Bug.all_bugs.filter(
canonical_bug_link__contains=
'http://twistedmatrix.com/trac/'):
tb = mysite.customs.bugtrackers.trac.TracBug.from_url(
bug.canonical_bug_link)
task = LookAtOneTwistedBug()
task.delay(bug_id=tb.bug_id)
tasks.register(LearnAboutNewEasyTwistedBugs)
tasks.register(LookAtOneTwistedBug)
tasks.register(RefreshAllTwistedEasyBugs)
@celery.decorators.task
def look_at_sugar_labs_bug(bug_id):
logging.info("Looking at bug %d in Sugar Labs" % bug_id)
tb = mysite.customs.bugtrackers.trac.TracBug(
bug_id=bug_id,
BASE_URL='http://bugs.sugarlabs.org/')
bug_url = tb.as_bug_specific_url()
try:
bug = mysite.search.models.Bug.all_bugs.get(
canonical_bug_link=bug_url)
except mysite.search.models.Bug.DoesNotExist:
bug = mysite.search.models.Bug(canonical_bug_link = bug_url)
# Hopefully, the bug is so fresh it needs no refreshing.
if bug.data_is_more_fresh_than_one_day():
logging.info("Bug is fresh. Doing nothing!")
return # sweet
# Okay, fine, we need to actually refresh it.
logging.info("Refreshing bug %d from Sugar Labs." %
bug_id)
data = tb.as_data_dict_for_bug_object()
for key in data:
value = data[key]
setattr(bug, key, value)
# And the project...
project_from_tb, _ = mysite.search.models.Project.objects.get_or_create(name=tb.component)
if bug.project_id != project_from_tb.id:
bug.project = project_from_tb
bug.save()
logging.info("Finished with %d from Sugar Labs." % bug_id)
@celery.decorators.periodic_task(run_every=datetime.timedelta(days=1))
def learn_about_new_sugar_easy_bugs():
logging.info('Started to learn about new Sugar Labs easy bugs.')
for bug_id in mysite.customs.bugtrackers.trac.csv_url2list_of_bug_ids(
mysite.customs.bugtrackers.trac.csv_of_bugs(
url='http://bugs.sugarlabs.org/query?status=new&status=assigned&status=reopened&format=csv&keywords=%7Esugar-love&order=priority')):
look_at_sugar_labs_bug.delay(bug_id=bug_id)
logging.info('Finished grabbing the list of Sugar Labs easy bugs.')
@celery.decorators.periodic_task(run_every=datetime.timedelta(days=1))
def refresh_all_sugar_easy_bugs():
logging.info("Starting refreshing all Sugar bugs.")
for bug in mysite.search.models.Bug.all_bugs.filter(
canonical_bug_link__contains=
'http://bugs.sugarlabs.org/'):
tb = mysite.customs.bugtrackers.trac.TracBug.from_url(
bug.canonical_bug_link)
look_at_sugar_labs_bug.delay(bug_id=tb.bug_id)
|
Python
| 0 |
@@ -4556,16 +4556,65 @@
from_tb%0A
+ bug.last_polled = datetime.datetime.utcnow()%0A
bug.
|
7f64a56a17fc6d73da4ac2987d42931885925db0
|
Create server.py
|
server/server.py
|
server/server.py
|
Python
| 0.000001 |
@@ -0,0 +1,170 @@
+import http.server%0Aimport socketserver%0APORT = 80%0AHandler = http.server.SimpleHTTPRequestHandler%0Ahttpd = socketserver.TCPServer((%22%22, PORT), Handler)%0Ahttpd.serve_forever()%0A
|
|
be6997772bd7e39dd1f68d96b3d52a82372ad216
|
update migartions
|
tracpro/supervisors/migrations/0002_auto_20141102_2231.py
|
tracpro/supervisors/migrations/0002_auto_20141102_2231.py
|
Python
| 0 |
@@ -0,0 +1,749 @@
+# -*- coding: utf-8 -*-%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import models, migrations%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('supervisors', '0001_initial'),%0A %5D%0A%0A operations = %5B%0A migrations.AlterModelOptions(%0A name='supervisor',%0A options=%7B'verbose_name': 'Supervisor', 'verbose_name_plural': 'Supervisors'%7D,%0A ),%0A migrations.AlterField(%0A model_name='supervisor',%0A name='region',%0A field=models.CharField(help_text='The name of the Region or State this supervisor belongs to, this should map to the Contact group on RapidPro', max_length=64, verbose_name='Region'),%0A preserve_default=True,%0A ),%0A %5D%0A
|
|
0ccca70cf289fb219768d1a124cacf11396a0ecc
|
Add files via upload
|
src/pque.py
|
src/pque.py
|
Python
| 0 |
@@ -0,0 +1,2044 @@
+class Pque(object):%0A %22%22%22make as priority queue priority scale is 0 through -99%0A 0 has greatest priority with ties being first come first pop%22%22%22%0A def __init__(self):%0A self.next_node = None%0A self.priority = 0%0A self.value = None%0A self.tail = None%0A self.head = None%0A self.size = 0%0A %0A def insert(self,value , priority = -99):%0A %22%22%22 inserts a value into the que defalt priority is -99%22%22%22%0A new_pque = Pque()%0A new_pque.priority = priority%0A if self.size is 0:%0A self.head = new_pque%0A self.tail = new_pque%0A else:%0A current_node = self.head%0A pre_node = None%0A for x in range(self.size - 1):%0A if new_pque.priority %3E current_node.priority:%0A if current_node is self.head:%0A new_pque.next_node = self.head%0A self.head = new_pque%0A break%0A else:%0A pre_node.next_node = new_pque%0A new_pque.next_node = current.node%0A break%0A if current_node is self.tail:%0A self.tail.next_node = new_pque%0A self.tail = new_pque%0A break%0A else:%0A pre_node = current_node%0A current_node = current_node.next_node%0A self.size += 1%0A new_pque.value = value%0A def peek(self):%0A %22%22%22returns the data in the head of the pque with out removing it%22%22%22%0A if self.head is None:%0A raise IndexError ('que is empty')%0A return slef.head.value%0A %0A def pop(self):%0A %22%22%22returns the data in the head of pque and removes it %22%22%22%0A if self.head is None:%0A raise IndexError ('que is empty')%0A temp_val = self.head.value%0A self.head = self.head.next_node%0A self.size -= 1%0A return temp_val%0A %0A %0A %0A %0A
|
|
06235d5913cd5eb54d3767f6a7cf60acb1966b39
|
Create prettyrpc.py
|
prettyrpc.py
|
prettyrpc.py
|
Python
| 0 |
@@ -0,0 +1,325 @@
+#!/usr/bin/env python%0A# -*- coding: utf-8 -*-%0A%0Afrom xmlrpclib import ServerProxy%0A%0Aclass PrettyProxy(object):%0A def __init__(self, *args, **kwargs):%0A self._real_proxy = ServerProxy(*args, **kwargs)%0A%0A def __getattr__(self, name):%0A return lambda *args, **kwargs: getattr(self._real_proxy, name)(args, kwargs)%0A
|
|
38faa038cbc7b8cedbb2dc13c2760f2a270a5f1a
|
Create problem-5.py
|
problem-5.py
|
problem-5.py
|
Python
| 0.000911 |
@@ -0,0 +1,287 @@
+n = 0%0Awhile True:%0A n += 1%0A divisible_list = %5B%5D%0A for i in range(1,21):%0A is_divisible = (n %25 i == 0)%0A if is_divisible:%0A divisible_list.append(is_divisible)%0A else:%0A break%0A %0A if len(divisible_list) == 20:%0A break%0A%0Aprint(n)%0A
|
|
d326391f6412afb54ee05a02b3b11e075f703765
|
fix value < 0 or higher than max. closes #941
|
kivy/uix/progressbar.py
|
kivy/uix/progressbar.py
|
'''
Progress Bar
============
.. versionadded:: 1.0.8
.. image:: images/progressbar.jpg
:align: right
The :class:`ProgressBar` widget is used to visualize progress of some task.
Only horizontal mode is supported, vertical mode is not available yet.
The progress bar has no interactive elements, It is a display-only widget.
To use it, simply assign a value to indicate the current progress::
from kivy.uix.progressbar import ProgressBar
pb = ProgressBar(max=1000)
# this will update the graphics automatically (75% done):
pb.value = 750
'''
__all__ = ('ProgressBar', )
from kivy.uix.widget import Widget
from kivy.properties import NumericProperty, AliasProperty
class ProgressBar(Widget):
'''Class for creating a Progress bar widget.
See module documentation for more details.
'''
value = NumericProperty(0.)
'''Current value used for the slider.
:data:`value` is a :class:`~kivy.properties.NumericProperty`, default to 0.
'''
def get_norm_value(self):
d = self.max
if d == 0:
return 0
return self.value / float(d)
def set_norm_value(self, value):
self.value = value * self.max
value_normalized = AliasProperty(get_norm_value, set_norm_value,
bind=('value', 'max'))
'''Normalized value inside the 0-max to 0-1 range::
>>> pb = ProgressBar(value=50, max=100)
>>> pb.value
50
>>> slider.value_normalized
0.5
:data:`value_normalized` is an :class:`~kivy.properties.AliasProperty`.
'''
max = NumericProperty(100.)
'''Maximum value allowed for :data:`value`.
:data:`max` is a :class:`~kivy.properties.NumericProperty`, default to 100.
'''
if __name__ == '__main__':
from kivy.base import runTouchApp
runTouchApp(ProgressBar(value=50))
|
Python
| 0.000019 |
@@ -829,34 +829,386 @@
-value = NumericProperty(0.
+def __init__(self, **kwargs):%0A self._value = 0.%0A super(ProgressBar, self).__init__(**kwargs)%0A%0A def _get_value(self):%0A return self._value%0A%0A def _set_value(self, value):%0A value = max(0, min(self.max, value))%0A if value != self._value:%0A self._value = value%0A return True%0A%0A value = AliasProperty(_get_value, _set_value
)%0A
@@ -1288,39 +1288,37 @@
kivy.properties.
-Numeric
+Alias
Property%60, defau
@@ -1316,21 +1316,229 @@
y%60,
-default to 0.
+than returns the%0A value of the progressbar. If the value is %3C 0 or %3E :data:%60max%60, it will be%0A normalized to thoses boundaries.%0A%0A .. versionchanged:: 1.5.2%0A The value is now limited between 0 to :data:%60max%60
%0A
@@ -1743,24 +1743,25 @@
* self.max%0A
+%0A
value_no
|
5e2e5eed760fdc40d474e511662cf7c22b1ea29b
|
add usbwatch.py
|
usbwatch.py
|
usbwatch.py
|
Python
| 0.000003 |
@@ -0,0 +1,2657 @@
+#!/usr/bin/env python3%0A# usbwatch.py - monitor addition/removal of USB devices%0A#%0A#%0A%0Aimport pyudev%0A%0Aclass UsbDevice:%0A @staticmethod%0A def fromUdevDevice(udev):%0A attr = lambda name: udev.attributes.asstring(name)%0A try:%0A try:%0A manufacturer = attr('manufacturer')%0A except KeyError:%0A manufacturer = None%0A return UsbDevice( udev.device_path,%0A ( attr('busnum') + '-' + attr('devpath'),%0A attr('idVendor') + %22:%22 + attr('idProduct'),%0A manufacturer,%0A attr('product')%0A )%0A )%0A except KeyError:%0A return None%0A%0A def __init__(self, devicePath, info):%0A self.path = devicePath%0A self.bus, self.id, self.manufacturer, self.product = info%0A%0A def __repr__(self):%0A return %22UsbDevice(%25s, %25s)%22 %25 (self.path, (self.bus, self.id, self.manufacturer, self.product))%0A%0A def __str__(self):%0A return %22%25s (%25s): %25s, %25s%22 %25 (self.id, self.bus, self.manufacturer, self.product)%0A%0A%0A# UsbWatcher monitors the connection status of USB devices.%0A# It remembers the devices which are %22known%22 to be connected to the system.%0Aclass UsbWatcher:%0A def __init__(self):%0A self.ctx = pyudev.Context()%0A self.mon = pyudev.Monitor.from_netlink(self.ctx)%0A self.mon.filter_by('usb')%0A self.knowns = dict()%0A%0A # Query the currently connected USB devices%0A # Forcefully updates the list of %22known%22 devices%0A def poll(self):%0A old_knowns = self.knowns%0A self.knowns = dict()%0A%0A for udev in self.ctx.list_devices(subsystem=%22usb%22):%0A dev = UsbDevice.fromUdevDevice(udev)%0A if dev is not None:%0A self.knowns%5Budev.device_path%5D = dev%0A if udev.device_path in old_knowns:%0A old_knowns.pop(udev.device_path)%0A else:%0A self.onAdd(dev)%0A%0A for path, dev in old_knowns.items():%0A self.onRemove(dev)%0A%0A # Monitor newly added devices. Any devices connected beforehand are ignored%0A def watch(self):%0A for action, udev in iter(self.mon):%0A if action == 'add':%0A dev = UsbDevice.fromUdevDevice(udev)%0A if dev is not None:%0A self.knowns%5Budev.device_path%5D = dev%0A self.onAdd(dev)%0A elif action == 'remove':%0A if udev.device_path in self.knowns:%0A dev = self.knowns.pop(udev.device_path)%0A self.onRemove(dev)%0A%0A # Called upon a device is added to the system%0A # Override this%0A def onAdd(self,dev):%0A print(%22add %25s %22 %25 str(dev))%0A%0A # Called upon a device is removed from the system%0A # Override this%0A def onRemove(self,dev):%0A print(%22remove %25s %22 %25 str(dev))%0A%0Aif __name__ == %22__main__%22:%0A try:%0A wat = UsbWatcher()%0A wat.poll()%0A wat.watch()%0A except KeyboardInterrupt:%0A pass%0A
|
|
9c2be5533dc14443a67ed22c34e2f059992e43cb
|
Create camera.py
|
Camera/camera.py
|
Camera/camera.py
|
Python
| 0.000002 |
@@ -0,0 +1,336 @@
+from SimpleCV import Camera%0A# Initialize the camera%0Acam = Camera()%0A# Loop to continuously get images%0Awhile True:%0A # Get Image from camera%0A img = cam.getImage()%0A # Make image black and white%0A img = img.binarize()%0A # Draw the text %22Hello World%22 on image%0A img.drawText(%22Hello World!%22)%0A # Show the image%0A img.show()%0A
|
|
6014dab06ed2275c5703ab9f9e63272656733c69
|
Add retrieve_all_pages util method from mtp-cashbook
|
moj_utils/rest.py
|
moj_utils/rest.py
|
Python
| 0 |
@@ -0,0 +1,754 @@
+from django.conf import settings%0A%0A%0Adef retrieve_all_pages(api_endpoint, **kwargs):%0A %22%22%22%0A Some MTP apis are paginated, this method loads all pages into a single results list%0A :param api_endpoint: slumber callable, e.g. %60%5Bapi_client%5D.cashbook.transactions.locked.get%60%0A :param kwargs: additional arguments to pass into api callable%0A %22%22%22%0A loaded_results = %5B%5D%0A%0A offset = 0%0A while True:%0A response = api_endpoint(limit=settings.REQUEST_PAGE_SIZE, offset=offset,%0A **kwargs)%0A count = response.get('count', 0)%0A loaded_results += response.get('results', %5B%5D)%0A if len(loaded_results) %3E= count:%0A break%0A offset += settings.REQUEST_PAGE_SIZE%0A%0A return loaded_results%0A
|
|
8d10e0e2db81023cb435b047f5c1da793e4b992e
|
Add python/matplotlib_.py
|
python/matplotlib_.py
|
python/matplotlib_.py
|
Python
| 0.00352 |
@@ -0,0 +1,929 @@
+# matplotlib_.py%0D%0A%0D%0A# Imports%0D%0Afrom matplotlib import ticker%0D%0A%0D%0A# label_axis%0D%0Adef label_axis(ax, x_or_y, axis_labels, flip, **props):%0D%0A axis_ticks = range(0, len(axis_labels))%0D%0A axis = getattr(ax, '%25saxis' %25 x_or_y)%0D%0A axis.set_major_locator(ticker.FixedLocator(axis_ticks))%0D%0A axis.set_minor_locator(ticker.NullLocator())%0D%0A axis.set_major_formatter(ticker.FixedFormatter(axis_labels))%0D%0A axis.set_minor_formatter(ticker.NullFormatter())%0D%0A lim = (-0.5, len(axis_labels) - 0.5)%0D%0A if flip:%0D%0A lim = lim%5B::-1%5D%0D%0A set_lim = getattr(ax, 'set_%25slim' %25 x_or_y)%0D%0A set_lim(*lim)%0D%0A if props:%0D%0A plt.setp(axis.get_majorticklabels(), **props)%0D%0A%0D%0A# label_xaxis%0D%0Adef label_xaxis(ax, xaxis_labels, flip=False, **props):%0D%0A label_axis(ax, 'x', xaxis_labels, flip, **props)%0D%0A%0D%0A# label_yaxis%0D%0Adef label_yaxis(ax, yaxis_labels, flip=False, **props):%0D%0A label_axis(ax, 'y', yaxis_labels, flip, **props)%0D%0A
|
|
326ef75042fc1d3eeeb6834fd5ff80a2bd1a2be1
|
Add incoreect_regex.py solution
|
HackerRank/PYTHON/Errors_and_Exceptions/incoreect_regex.py
|
HackerRank/PYTHON/Errors_and_Exceptions/incoreect_regex.py
|
Python
| 0.000002 |
@@ -0,0 +1,209 @@
+#!/usr/bin/env python3%0Aimport re%0A%0Aif __name__ == '__main__':%0A for _ in range(int(input())):%0A try:%0A re.compile(input())%0A print('True')%0A except:%0A print('False')%0A
|
|
c675fe2a82733ef210bf287df277f8ae956a4295
|
Add beginning of main script
|
rarbg-get.py
|
rarbg-get.py
|
Python
| 0.000005 |
@@ -0,0 +1,409 @@
+#!env /usr/bin/python3%0A%0Aimport sys%0Aimport urllib.parse%0Aimport urllib.request%0A%0Adef main():%0A search = sys.argv%5B1%5D%0A url = 'http://rarbg.to/torrents.php?order=seeders&by=DESC&search='%0A url = url + search%0A print(url)%0A req = urllib.request.Request(url, headers=%7B'User-Agent' : %22Magic Browser%22%7D)%0A resp = urllib.request.urlopen(req)%0A respData = resp.read()%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
480ae590ea1116fdbb5c6601d7466408f274c433
|
Implement for GNOME activateAutoLoginCommand
|
src/nrvr/el/gnome.py
|
src/nrvr/el/gnome.py
|
Python
| 0.000001 |
@@ -0,0 +1,1505 @@
+#!/usr/bin/python%0A%0A%22%22%22nrvr.el.gnome - Manipulate Enterprise Linux GNOME%0A%0AClasses provided by this module include%0A* Gnome%0A%0ATo be improved as needed.%0A%0AIdea and first implementation - Leo Baschy %3Csrguiwiz12 AT nrvr DOT com%3E%0A%0APublic repository - https://github.com/srguiwiz/nrvr-commander%0A%0ACopyright (c) Nirvana Research 2006-2013.%0AModified BSD License%22%22%22%0A%0Aimport re%0A%0Aclass Gnome():%0A %22%22%22Utilities for manipulating a Gnome installation.%22%22%22%0A%0A @classmethod%0A def activateAutoLoginCommand(cls, username=None):%0A %22%22%22Build command to activate auto-login into GNOME.%0A %0A username%0A defaults to None, which effects deactivateAutoLoginCommand.%0A %0A Return command to activate auto-login into GNOME.%22%22%22%0A command = cls.deactivateAutoLoginCommand()%0A if username:%0A username = re.escape(username) # precaution%0A command += r%22 ; sed -i -e '/%5E%5C%5Bdaemon%5C%5D/ a %5CAutomaticLoginEnable=true%5CnAutomaticLogin=%22 + username + r%22' /etc/gdm/custom.conf%22%0A return command%0A %0A @classmethod%0A def deactivateAutoLoginCommand(cls):%0A %22%22%22Build command to deactivate auto-login into GNOME.%0A %0A Return command to deactivate auto-login into GNOME.%22%22%22%0A return r%22sed -i -e '/%5E%5Cs*AutomaticLoginEnable%5Cs*=/ d' -e '/%5E%5Cs*AutomaticLogin%5Cs*=/ d' /etc/gdm/custom.conf%22%0A%0Aif __name__ == %22__main__%22:%0A print Gnome.activateAutoLoginCommand(%22joe%22)%0A print Gnome.deactivateAutoLoginCommand()%0A print Gnome.activateAutoLoginCommand()%0A
|
|
6affa7946bafc418423c8e1857c6f2b55066c31a
|
this will find all the primes below a given number
|
generateprimes.py
|
generateprimes.py
|
Python
| 0.999869 |
@@ -0,0 +1,750 @@
+#!/usr/bin/env python 3.1%0A#doesn't work for numbers less than 4 %0Afrom math import sqrt %0Adef nextprime(number): %0A%09%22This function will find the smallest prime larger than the current number%22%0A%09potential= number%0A%09j=2%0A%09while j%3C=sqrt(potential):%0A %09if potential%25j:%0A%09%09%09j=j+1%0A%09%09else:%0A%09%09%09#print %22not prime%22%0A%09%09%09potential=potential +1%0A%09%09%09j=2%0A%09%09%09continue%0A%09#print potential%0A%09return potential %0Anumber =4 %0Arawstop = raw_input('I want to find all the prime numbers below ')%0Astop = int(rawstop)%0Aprint 2%0Aprint 3%0Awhile number %3C stop:%0A%09number = nextprime(number) %0A%09if number%3Cstop: %0A%09%09print number %0A%09number= number +1 #would changing the one to a two allow me to skip testing even numbers? yes, now its faster %0A%09continue %0A%09number = nextprime(number)%0Aprint %22done%22%0A
|
|
6d12624e094ec58118d39c4340438c4a814d404f
|
add wildcard, this is just a string contain problem
|
wildcard.py
|
wildcard.py
|
Python
| 0.000001 |
@@ -0,0 +1,2938 @@
+class Solution:%0A%0A # @param s, an input string%0A%0A # @param p, a pattern string%0A%0A # @return a boolean%0A def shrink(self, pattern):%0A shrinked = %5B%5D%0A i = 0%0A while i %3C len(pattern):%0A stars = 0%0A questions = 0%0A while i %3C len(pattern) and pattern%5Bi%5D in %5B'*', '?'%5D:%0A if pattern%5Bi%5D == '*':%0A stars += 1%0A else:%0A questions += 1%0A i += 1%0A if stars == 0:%0A if questions %3E 0:%0A shrinked.extend(%5B'?'%5D * questions)%0A else:%0A shrinked.append(('*', questions))%0A%0A if i %3C len(pattern):%0A shrinked.append(pattern%5Bi%5D)%0A i += 1%0A return shrinked%0A%0A def compress_string_score(self, string_score, pattern_list):%0A compressed = %5B%5D%0A i = 0%0A while i %3C len(string_score):%0A p = pattern_list%5Bstring_score%5Bi%5D%5D%0A compressed.append(p)%0A repeat = 0%0A while p != '?' and i %3C len(string_score)-1 and%5C%0A string_score%5Bi + 1%5D == string_score%5Bi%5D:%0A repeat += 1%0A i += 1%0A if repeat:%0A compressed.append(('*', repeat))%0A i += 1%0A return compressed%0A%0A def isMatch(self, s, p):%0A%0A pl = self.shrink(p)%0A string_score = %5B%5D%0A cursor = 0%0A for c in s:%0A try:%0A while cursor %3C len(pl) and isinstance(pl%5Bcursor%5D, tuple):%0A cursor += 1%0A if cursor %3E= len(pl):%0A # pattern exhausted, while string exists%0A break%0A%0A # cursor is not a star%0A if c == pl%5Bcursor%5D or pl%5Bcursor%5D == '?':%0A string_score.append(cursor)%0A # move on until meets with an alphabetic%0A cursor += 1%0A else:%0A if string_score:%0A string_score.append(string_score%5B-1%5D)%0A else:%0A return False%0A except:%0A print %22%25s: %25s vs %25s: %25s%22 %25 (s, c, pl, cursor)%0A print string_score%0A raise%0A compressed = self.compress_string_score(string_score, pl)%0A print %22%25s %25s vs %25s%22 %25 (string_score, compressed, pl)%0A for c_single, p_single in zip(compressed, pl):%0A if c_single != p_single:%0A if isinstance(c_single, tuple) and isinstance(p_single, tuple)%5C%0A and c_single%5B1%5D %3E p_single%5B1%5D:%0A continue%0A else:%0A return False%0A return True%0A%0Aso = Solution()%0Als = %5B%22aa%22, %22aa%22, %22aaa%22, %22aa%22, %22aa%22, %22ab%22, %22aab%22, %22axxbxxycxxde%22%5D%0Alp = %5B%22a%22, %22aa%22, %22aa%22, %22*%22, %22a*%22, %22?*%22, %22c*a*b%22, %22a**?*??b???c?d*?*e%22%5D%0Afor s,p in zip(ls, lp):%0A line = %22%25s, %25s -%3E %25s%22 %25 (s, p, so.isMatch(s, p))%0A print line%0A
|
|
00c86aff808ecc5b6f015da5977265cfa76826bb
|
add fixtures that start related worker for tests
|
livewatch/tests/conftest.py
|
livewatch/tests/conftest.py
|
Python
| 0 |
@@ -0,0 +1,1587 @@
+import pytest%0Aimport time%0A%0Aimport django_rq%0Afrom celery.signals import worker_ready%0A%0Afrom .celery import celery%0A%0A%0AWORKER_READY = list()%0A%0A%0A@worker_ready.connect%0Adef on_worker_ready(**kwargs):%0A %22%22%22Called when the Celery worker thread is ready to do work.%0A This is to avoid race conditions since everything is in one python process.%0A %22%22%22%0A WORKER_READY.append(True)%0A%0A%[email protected]_fixture%0Adef celery_worker(request):%0A %22%22%22Fixture starting a celery worker in background%22%22%22%0A%0A from multiprocessing import Process%0A%0A celery_args = %5B'-C', '-q', '-c', '1', '-P', 'solo', '--without-gossip'%5D%0A proc = Process(target=lambda: celery.worker_main(celery_args))%0A%0A def cleanup():%0A proc.terminate()%0A%0A request.addfinalizer(cleanup)%0A proc.start()%0A%0A # Wait for worker to finish initializing to avoid a race condition I've been experiencing.%0A for i in range(5):%0A if WORKER_READY:%0A break%0A time.sleep(1)%0A%0A yield proc%0A%0A proc.terminate()%0A%0A time.sleep(1)%0A%0A%[email protected]_fixture%0Adef rq_worker(request):%0A %22%22%22Fixture starting a rq worker in background%22%22%22%0A%0A from multiprocessing import Process%0A%0A def _proc_target(env):%0A import os%0A os.environ.update(env)%0A worker = django_rq.get_worker()%0A worker.work()%0A%0A proc = Process(target=_proc_target, kwargs=%7B%0A 'env': %7B'DJANGO_SETTINGS_MODULE': 'livewatch.tests.settings'%7D%0A %7D)%0A%0A def cleanup():%0A proc.terminate()%0A%0A request.addfinalizer(cleanup)%0A proc.start()%0A%0A time.sleep(1)%0A%0A yield proc%0A%0A proc.terminate()%0A%0A time.sleep(1)%0A
|
|
275cddfa56501868787abeef10fc515102ffd11d
|
make setup.py find all packages, now in src
|
python/setup.py
|
python/setup.py
|
Python
| 0 |
@@ -0,0 +1,232 @@
+from distutils.core import setup%0A%0Afrom setuptools import find_packages%0A%0Asetup(name='fancontrol',%0A version='0.1.0',%0A modules=%5B'fancontrol'%5D,%0A packages=find_packages(where=%22src%22),%0A package_dir=%7B%22%22: %22src%22%7D,%0A )%0A
|
|
9c5de3b667a8e98b0304fb64e30113f551b33404
|
Create getTwitterData.py
|
getTwitterData.py
|
getTwitterData.py
|
Python
| 0.000001 |
@@ -0,0 +1,1212 @@
+from tweepy import Stream%0Afrom tweepy import OAuthHandler%0Afrom tweepy.streaming import StreamListener%0Aimport time%0A%0Ackey = 'dNATh8K9vGwlOSR2phVzaB9fh'%0Acsecret = 'LmBKfyfoZmK1uIu577yFR9jYkVDRC95CXcKZQBZ8jWx9qdS4Vt'%0Aatoken = '2165798475-nuQBGrTDeCgXTOneasqSFZLd3SppqAJDmXNq09V'%0Aasecret = 'FOVzgXM0NJO2lHFydFCiOXCZdkhHlYBkmPNsWbRhLk8xd'%0A%0Aclass Listener(StreamListener):%0A def on_data(self, data):%0A try:%0A #print data%0A tweet = data.split(',%22text%22:%22')%5B1%5D.split('%22,%22source')%5B0%5D%0A #print tweet%0A %0A #saveThis = str(time.time())+'::'+tweet%0A saveFile = open('twitterData.txt','a')%0A saveFile.write(tweet)%0A saveFile.write('%5Cn')%0A saveFile.close()%0A%0A except BaseException as e:%0A print ('failed ondata'), str(e)%0A time.sleep(5)%0A %0A %0A def on_error(self, status):%0A print (status)%0A %0A#to authorize %0Aauth = OAuthHandler(ckey, csecret)%0Aauth.set_access_token(atoken, asecret)%0AtwitterStream= Stream(auth, Listener())%0AtwitterStream.filter(track=%5B'Apple'%5D)%0A%0Astart_time = time.clock()%0Awhile True:%0A if time.clock() - start_time %3E 5:%0A break %0AtwitterStream.disconnect()%0A%0A
|
|
e84640c5c67759be3de1a934d974c250d7b73a0c
|
Split kernels into their own name space
|
scikits/statsmodels/sandbox/kernel.py
|
scikits/statsmodels/sandbox/kernel.py
|
Python
| 0.999848 |
@@ -0,0 +1,1596 @@
+# -*- coding: utf-8 -*-%0A%22%22%22%0AThis models contains the Kernels for Kernel smoothing.%0A%0AHopefully in the future they may be reused/extended for other kernel based method%0A%22%22%22%0Aclass Kernel(object):%0A %22%22%22%0A Generic 1D Kernel object.%0A Can be constructed by selecting a standard named Kernel,%0A or providing a lambda expression and domain.%0A The domain allows some algorithms to run faster for finite domain kernels.%0A %22%22%22%0A # MC: Not sure how this will look in the end - or even still exist.%0A # Main purpose of this is to allow custom kernels and to allow speed up%0A # from finite support.%0A%0A def __init__(self, shape, h = 1.0, domain = None):%0A %22%22%22%0A shape should be a lambda taking and returning numeric type.%0A%0A For sanity it should always return positive or zero.%0A %22%22%22%0A self.domain = domain%0A # TODO: Add checking code that shape is valid%0A self._shape = shape%0A self.h = h%0A%0A def evaluate(self, xs, ys, x):%0A # TODO: make filtering more efficient%0A filtered = %5B(xx,yy) for xx,yy in zip(xs,ys) if (xx-x)/self.h %3E= self.domain%5B0%5D and (xx-x)/self.h %3C= self.domain%5B1%5D%5D%0A if len(filtered) %3E 0:%0A xs,ys = zip(*filtered)%0A w = np.sum(%5Bself((xx-x)/self.h) for xx in xs%5D)%0A v = np.sum(%5Byy*self((xx-x)/self.h) for xx, yy in zip(xs,ys)%5D)%0A return v/w%0A else:%0A return 0%0A%0A def __call__(self, x):%0A return self._shape(x)%0A%0Aclass Gaussian(Kernel):%0A def __init__(self, h=1.0):%0A self.h = h%0A self._shape = lambda x: np.exp(-x**2/2.0)%0A%0A
|
|
632056eef0666808d16740f434a305d0c8995132
|
Create magooshScraper.py
|
magooshScraper.py
|
magooshScraper.py
|
Python
| 0.000001 |
@@ -0,0 +1,1560 @@
+import scrapy%0Afrom bs4 import BeautifulSoup%0A%0Aclass magooshSpider(scrapy.Spider):%0A%09name = 'magoosh'%0A%09start_urls = %5B'http://gre.magoosh.com/login'%5D%0A%0A%09def parse(self, response):%0A%09%09return scrapy.FormRequest.from_response(%0A%09%09%09response,%0A%09%09%09'''%0A%09%09%09Replace the fake text below with your own registered%0A%09%09%09email and password on http://gre.magoosh.com:%0A%09%09%09'''%0A%09%09%09formdata=%7B'session%5Blogin%5D': '[email protected]', 'session%5Bpassword%5D': 'somepassword'%7D,%0A%09%09%09callback=self.after_login%0A%09%09)%0A%0A%09def after_login(self, response):%0A%09%09if 'Dashboard' in response.body:%0A%09%09%09self.logger.info('Logged in successfully!')%0A%0A%09%09return scrapy.Request('http://gre.magoosh.com/lessons',%0A%09%09%09callback=self.lessonsPage_loaded)%0A%0A%09def lessonsPage_loaded(self, response):%0A%09%09self.logger.info('Lessons page opened.')%0A%09%09soup = BeautifulSoup(response.body)%0A%09%09for categ in soup.find_all('h2'):%0A%09%09%09# Set the Subject name to crawl%0A%09%09%09# In this example, Maths section is scraped.%0A%09%09%09if 'Math' in categ:%0A%09%09%09%09self.logger.info('Math section found.')%0A%09%09%09%09cgparent = categ.parent.parent%0A%09%09%09%09for vu in cgparent.find_all('a'):%0A%09%09%09%09%09link = str(vu.get('href'))%0A%09%09%09%09%09if '/lessons/' in link:%0A%09%09%09%09%09%09s = 'http://gre.magoosh.com' + str(link) + %22%5Cn%22%0A%09%09%09%09%09%09req = scrapy.Request(s, callback=self.videoPage_loaded)%0A%09%09%09%09%09%09yield req%0A%09%09return%0A%0A%09def videoPage_loaded(self, response):%0A%09%09self.logger.info('Fetching video...')%0A%09%09soup = BeautifulSoup(response.body)%0A%09%09for div in soup.find_all('div'):%0A%09%09%09if div.get('data-file'):%0A%09%09%09%09vl = div.get('data-file')%0A%09%09%09%09f = open('scrapedVideoLinks.txt', 'a')%0A%09%09%09%09f.write(str(vl) + '%5Cn')%0A%09%09%09%09f.close()%0A
|
|
dbfc033fdfaad5820765a41766a5342831f3c4f9
|
add util script to dump twitter oauth tokens
|
scripts/remove_twuser_oauth.py
|
scripts/remove_twuser_oauth.py
|
Python
| 0 |
@@ -0,0 +1,973 @@
+%22%22%22Remove a twitter user's oauth tokens and reload iembot%22%22%22%0Afrom __future__ import print_function%0Aimport json%0Aimport sys%0A%0Aimport psycopg2%0Aimport requests%0A%0A%0Adef main(argv):%0A %22%22%22Run for a given username%22%22%22%0A screen_name = argv%5B1%5D%0A settings = json.load(open(%22../settings.json%22))%0A pgconn = psycopg2.connect(database=settings%5B'databaserw'%5D%5B'openfire'%5D,%0A user=settings%5B'databaserw'%5D%5B'user'%5D,%0A host=settings%5B'databaserw'%5D%5B'host'%5D)%0A cursor = pgconn.cursor()%0A cursor.execute(%22%22%22%0A DELETE from iembot_twitter_oauth where screen_name = %25s%0A %22%22%22, (screen_name, ))%0A print((%22Removed %25s entries from the database for screen name '%25s'%22%0A ) %25 (cursor.rowcount, screen_name))%0A cursor.close()%0A pgconn.commit()%0A%0A uri = %22http://iembot:9003/reload%22%0A req = requests.get(uri, timeout=30)%0A print(%22reloading iembot %25s%22 %25 (repr(req.content), ))%0A%0A%0Aif __name__ == '__main__':%0A main(sys.argv)%0A
|
|
77aa24bbea447d8684614f0d089320d134412710
|
Test ini-configured app.
|
test_app.py
|
test_app.py
|
Python
| 0 |
@@ -0,0 +1,173 @@
+from flask import Flask%0Afrom flask.ext.iniconfig import INIConfig%0A%0A%0Aapp = Flask(__name__)%0AINIConfig(app)%0A%0A%0Awith app.app_context():%0A app.config.from_inifile('settings.ini')%0A
|
|
74bde8878aa9b336046374ce75fc4c7bc63eaba7
|
add test for VampSimpleHost
|
tests/test_vamp_simple_host.py
|
tests/test_vamp_simple_host.py
|
Python
| 0.000001 |
@@ -0,0 +1,917 @@
+#! /usr/bin/env python%0A%0Afrom unit_timeside import unittest, TestRunner%0Afrom timeside.decoder.file import FileDecoder%0Afrom timeside.core import get_processor%0Afrom timeside import _WITH_VAMP%0Afrom timeside.tools.test_samples import samples%0A%0A%[email protected](not _WITH_VAMP, 'vamp-simple-host library is not available')%0Aclass TestVampsimpleHost(unittest.TestCase):%0A%0A def setUp(self):%0A self.analyzer = get_processor('vamp_simple_host')()%0A%0A def testOnC4_scale(self):%0A %22runs on C4_scale%22%0A self.source = samples%5B%22C4_scale.wav%22%5D%0A%0A def tearDown(self):%0A decoder = FileDecoder(self.source)%0A (decoder %7C self.analyzer).run()%0A results = self.analyzer.results%0A print results.keys()%0A #print results%0A #print results.to_yaml()%0A #print results.to_json()%0A #print results.to_xml()%0A%0Aif __name__ == '__main__':%0A unittest.main(testRunner=TestRunner())%0A
|
|
0d596f8c7148c2ac13c2b64be09ca1e20719cdb9
|
add dumper of flowpaths to shapefile
|
scripts/util/dump_flowpaths.py
|
scripts/util/dump_flowpaths.py
|
Python
| 0 |
@@ -0,0 +1,520 @@
+%22%22%22Dump flowpaths to a shapefile.%22%22%22%0A%0Afrom geopandas import read_postgis%0Afrom pyiem.util import get_dbconn%0A%0A%0Adef main():%0A %22%22%22Go Main Go.%22%22%22%0A pgconn = get_dbconn('idep')%0A df = read_postgis(%22%22%22%0A SELECT f.fpath, f.huc_12, ST_Transform(f.geom, 4326) as geo from%0A flowpaths f, huc12 h WHERE h.scenario = 0 and f.scenario = 0%0A and h.huc_12 = f.huc_12 and h.states ~* 'IA'%0A %22%22%22, pgconn, index_col=None, geom_col='geo')%0A df.to_file(%22ia_flowpaths.shp%22)%0A%0A%0Aif __name__ == '__main__':%0A main()%0A
|
|
f7768b10df84a4b3bb784ee1d449e380b93d88bb
|
add a simple scan example
|
data/scan_example.py
|
data/scan_example.py
|
Python
| 0.000001 |
@@ -0,0 +1,1667 @@
+import numpy%0Aimport theano%0A%0Afrom theano import tensor%0A%0A%0A# some numbers%0An_steps = 10%0An_samples = 5%0Adim = 10%0Ainput_dim = 20%0Aoutput_dim = 2%0A%0A%0A# one step function that will be used by scan%0Adef oneStep(x_t, h_tm1, W_x, W_h, W_o):%0A%0A h_t = tensor.tanh(tensor.dot(x_t, W_x) +%0A tensor.dot(h_tm1, W_h))%0A o_t = tensor.dot(h_t, W_o)%0A%0A return h_t, o_t%0A%0A# spawn theano tensor variable, our symbolic input%0A# a 3D tensor (n_steps, n_samples, dim)%0Ax = tensor.tensor3(dtype='float32')%0A%0A# initial state of our rnn%0Ainit_state = tensor.alloc(0., n_samples, dim)%0A%0A# create parameters that we will use,%0A# note that, parameters are theano shared variables%0A%0A# parameters for input to hidden states%0AW_x_ = numpy.random.randn(input_dim, dim).astype('float32')%0AW_x = theano.shared(W_x_)%0A%0A# parameters for hidden state transition%0AW_h_ = numpy.random.randn(dim, dim).astype('float32')%0AW_h = theano.shared(W_h_)%0A%0A# parameters from hidden state to output%0AW_o_ = numpy.random.randn(dim, output_dim).astype('float32')%0AW_o = theano.shared(W_o_)%0A%0A# scan function%0A(%5Bh_vals, o_vals%5D, updates) = theano.scan(%0A fn=oneStep,%0A sequences=%5Bx%5D,%0A outputs_info=%5Binit_state, None%5D,%0A non_sequences=%5BW_x, W_h, W_o%5D,%0A n_steps=n_steps,%0A strict=True)%0A%0A# let us now compile a function to get the output%0Af = theano.function(%5Bx%5D, %5Bh_vals, o_vals%5D)%0A%0A# now we will call the compiled function with actual input%0Aactual_input = numpy.random.randn(%0A n_steps, n_samples, input_dim).astype('float32')%0Ah_vals_, o_vals_ = f(actual_input)%0A%0A# print the shapes%0Aprint 'shape of input :', actual_input.shape%0Aprint 'shape of h_vals:', h_vals_.shape%0Aprint 'shape of o_vals:', o_vals_.shape%0A
|
|
9737f8b1551adb5d3be62b1922de27d867ac2b24
|
Add forwarding script for build-bisect.py.
|
build/build-bisect.py
|
build/build-bisect.py
|
Python
| 0.000002 |
@@ -0,0 +1,324 @@
+#!/usr/bin/python%0A# Copyright (c) 2010 The Chromium Authors. All rights reserved.%0A# Use of this source code is governed by a BSD-style license that can be%0A# found in the LICENSE file.%0A%0Aimport sys%0A%0Aprint %22This script has been moved to tools/bisect-builds.py.%22%0Aprint %22Please update any docs you're working from!%22%0A%0Asys.exit(1)%0A
|
|
4c2663939008285c395ee5959c38fab280f43e58
|
Create 03.PracticeCharsAndStrings.py
|
TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Exercises/03.PracticeCharsAndStrings.py
|
TechnologiesFundamentals/ProgrammingFundamentals/DataTypesAndVariables-Exercises/03.PracticeCharsAndStrings.py
|
Python
| 0 |
@@ -0,0 +1,75 @@
+print(input())%0Aprint(input())%0Aprint(input())%0Aprint(input())%0Aprint(input())%0A
|
|
782e4da9d04c656b3e5290269a4f06328ee5d508
|
add file
|
main.py
|
main.py
|
Python
| 0.000002 |
@@ -0,0 +1,168 @@
+import numpy as np%[email protected]%0Adef F(n):%0A return 1./np.sqrt(5.)*(((1.+np.sqrt(5))/2.)**n-((1.-np.sqrt(5))/2.)**n)%0An = np.arange(10)%0AF = F(n)%0Anp.savetxt(%22F.txt%22, F)%0A%0A
|
|
60f13bdfb97e83ac1bf2f72e3eec2e2c2b88cbb3
|
add tests for potential density computation
|
biff/tests/test_bfe.py
|
biff/tests/test_bfe.py
|
Python
| 0.000001 |
@@ -0,0 +1,1045 @@
+# coding: utf-8%0A%0Afrom __future__ import division, print_function%0A%0A__author__ = %22adrn %[email protected]%3E%22%0A%0A# Third-party%0Aimport astropy.units as u%0Afrom astropy.constants import G as _G%0AG = _G.decompose(%5Bu.kpc,u.Myr,u.Msun%5D).value%0Aimport numpy as np%0A%0A# Project%0Afrom .._bfe import density%0A%0A# Check that we get A000=1. for putting in hernquist density%0Adef hernquist_density(xyz, M, r_s):%0A xyz = np.atleast_2d(xyz)%0A r = np.sqrt(np.sum(xyz**2, axis=-1))%0A return M/(2*np.pi) * r_s / (r * (r+r_s)**3)%0A%0Adef hernquist_potential(xyz, M, r_s):%0A xyz = np.atleast_2d(xyz)%0A r = np.sqrt(np.sum(xyz**2, axis=-1))%0A return -G*M / (r + r_s)%0A%0Adef test_hernquist():%0A nmax = 6%0A lmax = 2%0A%0A Anlm = np.zeros((nmax+1,lmax+1,lmax+1))%0A Anlm%5B0,0,0%5D = 1.%0A%0A M = 1E10%0A r_s = 3.5%0A%0A nbins = 128%0A rr = np.linspace(0.1,10.,nbins)%0A xyz = np.zeros((nbins,3))%0A xyz%5B:,0%5D = rr%0A%0A bfe_dens = density(xyz, M, r_s, Anlm, nmax, lmax)%0A true_dens = hernquist_density(xyz, M, r_s)%0A np.testing.assert_allclose(bfe_dens, true_dens)%0A%0A
|
|
1e808aa70882cd30cd0ac7a567d12efde99b5e61
|
Create runserver.py
|
runserver.py
|
runserver.py
|
Python
| 0.000002 |
@@ -0,0 +1,47 @@
+from ucwa.http import app%0A%0Aapp.run(debug=True)%0A
|
|
98eb8c1bb013106108e239c7bc8b6961a2f321cd
|
Allow debug mode from the CLI
|
blaze/server/spider.py
|
blaze/server/spider.py
|
#!/usr/bin/env python
from __future__ import absolute_import
import os
import sys
import argparse
import yaml
from odo import resource
from odo.utils import ignoring
from .server import Server, DEFAULT_PORT
__all__ = 'spider', 'from_yaml'
def _spider(resource_path, ignore, followlinks, hidden):
resources = {}
for filename in (os.path.join(resource_path, x)
for x in os.listdir(resource_path)):
basename = os.path.basename(filename)
if (basename.startswith(os.curdir) and not hidden or
os.path.islink(filename) and not followlinks):
continue
if os.path.isdir(filename):
new_resources = _spider(filename, ignore=ignore,
followlinks=followlinks, hidden=hidden)
if new_resources:
resources[basename] = new_resources
else:
with ignoring(*ignore):
resources[basename] = resource(filename)
return resources
def spider(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Traverse a directory and call ``odo.resource`` on its contentso
Parameters
----------
path : str
Path to a directory of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
Possibly nested dictionary of containing basenames mapping to resources
"""
return {
os.path.basename(path): _spider(path, ignore=ignore,
followlinks=followlinks,
hidden=hidden)
}
def from_yaml(path, ignore=(ValueError, NotImplementedError), followlinks=True,
hidden=False):
"""Construct a dictionary of resources from a YAML specification.
Parameters
----------
path : str
Path to a YAML specification of resources to load
ignore : tuple of Exception, optional
Ignore these exceptions when calling resource
followlinks : bool, optional
Follow symbolic links
hidden : bool, optional
Load hidden files
Returns
-------
dict
A dictionary mapping top level keys in a YAML file to resources.
See Also
--------
spider : Traverse a directory tree for resources
"""
resources = {}
for name, info in yaml.load(path.read()).items():
if 'source' not in info:
raise ValueError('source key not found for data source named %r' %
name)
source = info['source']
if os.path.isdir(source):
resources[name] = spider(os.path.expanduser(source),
ignore=ignore,
followlinks=followlinks,
hidden=hidden)
else:
resources[name] = resource(source, dshape=info.get('dshape'))
return resources
def _parse_args():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
p.add_argument('path', type=argparse.FileType('r'), nargs='?',
default=sys.stdin,
help='A YAML file specifying the resources to load')
p.add_argument('-p', '--port', type=int, default=DEFAULT_PORT,
help='Port number')
p.add_argument('-H', '--host', type=str, default='127.0.0.1',
help='Host name. Use 0.0.0.0 to listen on all public IPs')
p.add_argument('-l', '--follow-links', action='store_true',
help='Follow links when listing files')
p.add_argument('-e', '--ignored-exception', nargs='*',
default=['Exception'],
help='Exceptions to ignore when calling resource on a file')
p.add_argument('-d', '--hidden', action='store_true',
help='Call resource on hidden files')
return p.parse_args()
def _main():
args = _parse_args()
ignore = tuple(getattr(__builtins__, e) for e in args.ignored_exception)
resources = from_yaml(args.path,
ignore=ignore,
followlinks=args.follow_links,
hidden=args.hidden)
Server(resources).run(host=args.host, port=args.port)
if __name__ == '__main__':
_main()
|
Python
| 0.000001 |
@@ -4086,24 +4086,145 @@
den files')%0A
+ p.add_argument('-D', '--debug', action='store_true',%0A help='Start the Flask server in debug mode')%0A
return p
@@ -4591,16 +4591,34 @@
rgs.port
+, debug=args.debug
)%0A%0A%0Aif _
|
38b1353f42454905b0bc1b1e79e510a7df9db1cf
|
Clear memcached.
|
tools/run-dev.py
|
tools/run-dev.py
|
#!/usr/bin/env python
import optparse
import subprocess
import signal
import traceback
import sys
import os
from twisted.internet import reactor
from twisted.web import proxy, server, resource
# Monkey-patch twisted.web.http to avoid request.finish exceptions
# https://trac.zulip.net/ticket/1728
from twisted.web.http import Request
orig_finish = Request.finish
def patched_finish(self):
if not self._disconnected:
orig_finish(self)
Request.finish = patched_finish
if 'posix' in os.name and os.geteuid() == 0:
raise RuntimeError("run-dev.py should not be run as root.")
parser = optparse.OptionParser(r"""
Starts the app listening on localhost, for local development.
This script launches the Django and Tornado servers, then runs a reverse proxy
which serves to both of them. After it's all up and running, browse to
http://localhost:9991/
Note that, while runserver and runtornado have the usual auto-restarting
behavior, the reverse proxy itself does *not* automatically restart on changes
to this file.
""")
parser.add_option('--test',
action='store_true', dest='test',
help='Use the testing database and ports')
parser.add_option('--interface',
action='store', dest='interface',
default='127.0.0.1', help='Set the IP or hostname for the proxy to listen on')
(options, args) = parser.parse_args()
base_port = 9991
if options.test:
base_port = 9981
settings_module = "zproject.test_settings"
else:
settings_module = "zproject.settings"
manage_args = ['--settings=%s' % (settings_module,)]
os.environ['DJANGO_SETTINGS_MODULE'] = settings_module
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
proxy_port = base_port
django_port = base_port+1
tornado_port = base_port+2
webpack_port = base_port+3
os.chdir(os.path.join(os.path.dirname(__file__), '..'))
# Clean up stale .pyc files etc.
subprocess.check_call('./tools/clean-repo')
# Set up a new process group, so that we can later kill run{server,tornado}
# and all of the processes they spawn.
os.setpgrp()
# Pass --nostatic because we configure static serving ourselves in
# zulip/urls.py.
cmds = [['./tools/compile-handlebars-templates', 'forever'],
['python', 'manage.py', 'rundjango'] +
manage_args + ['localhost:%d' % (django_port,)],
['python', '-u', 'manage.py', 'runtornado'] +
manage_args + ['localhost:%d' % (tornado_port,)],
['./tools/run-dev-queue-processors'] + manage_args,
['env', 'PGHOST=localhost', # Force password authentication using .pgpass
'./puppet/zulip/files/postgresql/process_fts_updates']]
if options.test:
# Webpack doesn't support 2 copies running on the same system, so
# in order to support running the Casper tests while a Zulip
# development server is running, we use webpack in production mode
# for the Casper tests.
subprocess.check_call('./tools/webpack')
else:
cmds += [['./tools/webpack', '--watch', '--port', str(webpack_port)]]
for cmd in cmds:
subprocess.Popen(cmd)
class Resource(resource.Resource):
def getChild(self, name, request):
# Assume an HTTP 1.1 request
proxy_host = request.requestHeaders.getRawHeaders('Host')
request.requestHeaders.setRawHeaders('X-Forwarded-Host', proxy_host)
if (request.uri in ['/json/get_events'] or
request.uri.startswith('/json/events') or
request.uri.startswith('/api/v1/events') or
request.uri.startswith('/sockjs')):
return proxy.ReverseProxyResource('localhost', tornado_port, '/'+name)
elif (request.uri.startswith('/webpack') or
request.uri.startswith('/socket.io')):
return proxy.ReverseProxyResource('localhost', webpack_port, '/'+name)
return proxy.ReverseProxyResource('localhost', django_port, '/'+name)
try:
reactor.listenTCP(proxy_port, server.Site(Resource()), interface=options.interface)
reactor.run()
except:
# Print the traceback before we get SIGTERM and die.
traceback.print_exc()
raise
finally:
# Kill everything in our process group.
os.killpg(0, signal.SIGTERM)
|
Python
| 0 |
@@ -15,16 +15,55 @@
python%0A
+from __future__ import print_function%0A%0A
import o
@@ -1349,16 +1349,158 @@
n on')%0A%0A
+parser.add_option('--no-clear-memcached',%0A action='store_false', dest='clear_memcached',%0A default=True, help='Do not clear memcached')%0A%0A
(options
@@ -2100,16 +2100,142 @@
repo')%0A%0A
+if options.clear_memcached:%0A print(%22Clearing memcached ...%22)%0A subprocess.check_call('./scripts/setup/flush-memcached')%0A%0A
# Set up
|
f737a8be41111f65944b00eb85a76687653fc8c0
|
Create sort_fpkm.py
|
sort_fpkm.py
|
sort_fpkm.py
|
Python
| 0 |
@@ -0,0 +1,766 @@
+import os%0Aimport fnmatch%0Aimport sys, csv ,operator%0A%0Afor root, dirnames, filenames in os.walk('/Users/idriver/RockLab-files/test'):%0A for filename in fnmatch.filter(filenames, '*.fpkm_tracking'):%0A if filename =='isoforms.fpkm_tracking':%0A data = csv.reader(open(os.path.join(root, filename), 'rU'),delimiter='%5Ct')%0A header = next(data, None) # returns the headers or %60None%60 if the input is empty%0A sortedlist = sorted(data, key=operator.itemgetter(0))%0A #now write the sorte result into new CSV file%0A with open(root+'/'+root.split('/')%5B-1%5D+'_isoforms.fpkm_tracking', %22wb%22) as f:%0A fileWriter = csv.writer(f, delimiter='%5Ct')%0A fileWriter.writerow(header)%0A for row in sortedlist:%0A fileWriter.writerow(row)%0A
|
|
d42aad6a15dfe9cc5a63dbb19efe112534b91a5e
|
Add autoexec script for reference (already bundled in config)
|
resources/autoexec.py
|
resources/autoexec.py
|
Python
| 0 |
@@ -0,0 +1,287 @@
+# place at ~/.kodi/userdata/autoexec.py%0Aimport xbmc%0Aimport time%0Axbmc.executebuiltin(%22XBMC.ReplaceWindow(1234)%22)%0Atime.sleep(0.1)%0Axbmc.executebuiltin('PlayMedia(%22/storage/videos/SSL%22,%22isdir%22)')%0Axbmc.executebuiltin('xbmc.PlayerControl(repeatall)')%0Axbmc.executebuiltin(%22Action(Fullscreen)%22)%0A
|
|
51d26aecab3eee55a811657fe65b18cb08581f46
|
Make logging.yml configurable
|
blues/elasticsearch.py
|
blues/elasticsearch.py
|
"""
Elasticsearch Blueprint
=======================
**Fabric environment:**
.. code-block:: yaml
blueprints:
- blues.elasticsearch
settings:
elasticsearch:
version: 1.5 # Version of elasticsearch to install (Required)
cluster_name: foobar # Name of the cluster (Default: elasticsearch)
# heap_size: 1g # Heap Size (defaults to 256m min, 1g max)
# number_of_shards: 1 # Number of shards/splits of an index (Default: 5)
# number_of_replicas: 0 # Number of replicas / additional copies of an index (Default: 0)
# network_bind_host: 127.0.0.1 # Set the bind address specifically, IPv4 or IPv6 (Default: 0.0.0.0)
# network_publish_host: 127.0.0.1 # Set the address other nodes will use to communicate with this node (Optional)
# network_host: 127.0.0.1 # Set both `network_bind_host` and `network_publish_host` (Optional)
# queue_size: 3000 # Set thread pool queue size (Default: 1000)
# plugins: # Optional list of plugins to install
# - mobz/elasticsearch-head
"""
from fabric.decorators import task
from fabric.utils import abort
from refabric.api import info
from refabric.context_managers import sudo
from refabric.contrib import blueprints
from . import debian
from refabric.operations import run
__all__ = ['start', 'stop', 'restart', 'reload', 'setup', 'configure',
'install_plugin']
blueprint = blueprints.get(__name__)
start = debian.service_task('elasticsearch', 'start')
stop = debian.service_task('elasticsearch', 'stop')
restart = debian.service_task('elasticsearch', 'restart')
reload = debian.service_task('elasticsearch', 'force-reload')
@task
def setup():
"""
Install Elasticsearch
"""
install()
configure()
def install():
with sudo():
from blues import java
java.install()
version = blueprint.get('version', '1.0')
info('Adding apt repository for {} version {}', 'elasticsearch', version)
repository = 'http://packages.elasticsearch.org/elasticsearch/{0}/debian stable main'.format(version)
debian.add_apt_repository(repository)
info('Adding apt key for', repository)
debian.add_apt_key('http://packages.elasticsearch.org/GPG-KEY-elasticsearch')
debian.apt_get('update')
# Install elasticsearch (and java)
info('Installing {} version {}', 'elasticsearch', version)
debian.apt_get('install', 'elasticsearch')
# Install plugins
plugins = blueprint.get('plugins', [])
for plugin in plugins:
info('Installing elasticsearch "{}" plugin...', plugin)
install_plugin(plugin)
# Enable on boot
debian.add_rc_service('elasticsearch', priorities='defaults 95 10')
@task
def configure():
"""
Configure Elasticsearch
"""
context = {
'cluster_name': blueprint.get('cluster_name', 'elasticsearch'),
'number_of_shards': blueprint.get('number_of_shards', '5'),
'number_of_replicas': blueprint.get('number_of_replicas', '0'),
'bind_host': blueprint.get('network_bind_host'),
'publish_host': blueprint.get('network_publish_host'),
'host': blueprint.get('network_host'),
'queue_size': blueprint.get('queue_size', 1000)
}
config = blueprint.upload('./elasticsearch.yml', '/etc/elasticsearch/', context)
context = {
'heap_size': blueprint.get('heap_size', '256m')
}
default = blueprint.upload('./default', '/etc/default/elasticsearch', context)
if config or default:
restart()
@task
def install_plugin(name=None):
if not name:
abort('No plugin name given')
with sudo():
run('/usr/share/elasticsearch/bin/plugin -install {}'.format(name))
|
Python
| 0.000001 |
@@ -3432,24 +3432,25 @@
size', 1000)
+,
%0A %7D%0A c
@@ -3526,24 +3526,184 @@
, context)%0A%0A
+ context = %7B%0A 'log_level': blueprint.get('log_level', 'WARN'),%0A %7D%0A logging = blueprint.upload('./logging.yml', '/etc/elasticsearch/', context)%0A%0A
context
@@ -3869,16 +3869,27 @@
nfig or
+logging or
default:
|
5b89eb48a76a792cdd6e5cf8f0438f810a5e436a
|
Make isolated volume tests have unique tenant
|
tempest/tests/volume/base.py
|
tempest/tests/volume/base.py
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import time
import nose
import testtools
from tempest import clients
from tempest.common.utils.data_utils import rand_name
from tempest import config
from tempest import exceptions
LOG = logging.getLogger(__name__)
class BaseVolumeTest(testtools.TestCase):
"""Base test case class for all Cinder API tests."""
@classmethod
def setUpClass(cls):
cls.config = config.TempestConfig()
cls.isolated_creds = []
if cls.config.compute.allow_tenant_isolation:
creds = cls._get_isolated_creds()
username, tenant_name, password = creds
os = clients.Manager(username=username,
password=password,
tenant_name=tenant_name,
interface=cls._interface)
else:
os = clients.Manager(interface=cls._interface)
cls.os = os
cls.volumes_client = os.volumes_client
cls.servers_client = os.servers_client
cls.image_ref = cls.config.compute.image_ref
cls.flavor_ref = cls.config.compute.flavor_ref
cls.build_interval = cls.config.volume.build_interval
cls.build_timeout = cls.config.volume.build_timeout
cls.volumes = {}
skip_msg = ("%s skipped as Cinder endpoint is not available" %
cls.__name__)
try:
cls.volumes_client.keystone_auth(cls.os.username,
cls.os.password,
cls.os.auth_url,
cls.volumes_client.service,
cls.os.tenant_name)
except exceptions.EndpointNotFound:
cls.clear_isolated_creds()
raise cls.skipException(skip_msg)
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.ComputeAdminManager()
return os.identity_client
@classmethod
def _get_isolated_creds(cls):
"""
Creates a new set of user/tenant/password credentials for a
**regular** user of the Volume API so that a test case can
operate in an isolated tenant container.
"""
admin_client = cls._get_identity_admin_client()
rand_name_root = cls.__name__
if cls.isolated_creds:
# Main user already created. Create the alt one...
rand_name_root += '-alt'
username = rand_name_root + "-user"
email = rand_name_root + "@example.com"
tenant_name = rand_name_root + "-tenant"
tenant_desc = tenant_name + "-desc"
password = "pass"
resp, tenant = admin_client.create_tenant(name=tenant_name,
description=tenant_desc)
resp, user = admin_client.create_user(username,
password,
tenant['id'],
email)
# Store the complete creds (including UUID ids...) for later
# but return just the username, tenant_name, password tuple
# that the various clients will use.
cls.isolated_creds.append((user, tenant))
return username, tenant_name, password
@classmethod
def clear_isolated_creds(cls):
if not cls.isolated_creds:
pass
admin_client = cls._get_identity_admin_client()
for user, tenant in cls.isolated_creds:
admin_client.delete_user(user['id'])
admin_client.delete_tenant(tenant['id'])
@classmethod
def tearDownClass(cls):
cls.clear_isolated_creds()
def create_volume(self, size=1, metadata={}):
"""Wrapper utility that returns a test volume."""
display_name = rand_name(self.__class__.__name__ + "-volume")
cli_resp = self.volumes_client.create_volume(size=size,
display_name=display_name,
metdata=metadata)
resp, volume = cli_resp
self.volumes_client.wait_for_volume_status(volume['id'], 'available')
self.volumes.append(volume)
return volume
def wait_for(self, condition):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
condition()
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
condition()
return
time.sleep(self.build_interval)
class BaseVolumeTestJSON(BaseVolumeTest):
@classmethod
def setUpClass(cls):
cls._interface = "json"
super(BaseVolumeTestJSON, cls).setUpClass()
class BaseVolumeTestXML(BaseVolumeTest):
@classmethod
def setUpClass(cls):
cls._interface = "xml"
super(BaseVolumeTestXML, cls).setUpClass()
|
Python
| 0.998887 |
@@ -3069,24 +3069,34 @@
name_root =
+rand_name(
cls.__name__
@@ -3095,16 +3095,17 @@
__name__
+)
%0A
|
159ed7dd9dd5ade6c4310d2aa106b13bf94aa903
|
Add empty cloner
|
stoneridge_cloner.py
|
stoneridge_cloner.py
|
Python
| 0.000007 |
@@ -0,0 +1,428 @@
+#!/usr/bin/env python%0A# This Source Code Form is subject to the terms of the Mozilla Public License,%0A# v. 2.0. If a copy of the MPL was not distributed with this file, You can%0A# obtain one at http://mozilla.org/MPL/2.0/.%0A%0A# TODO - This will run on the central server, and download releases from ftp.m.o%0A# to a local directory for serving up to the clients, which will download the%0A# necessary stuff via stoneridge_downloader.py%0A
|
|
9c3682ec717fd4de5555874ad3665c6f7be479b8
|
improve ecom
|
netforce_sale/netforce_sale/models/payment_method.py
|
netforce_sale/netforce_sale/models/payment_method.py
|
from netforce.model import Model,fields,get_model
from netforce import database
from netforce.logger import audit_log
class PaymentMethod(Model):
_inherit="payment.method"
def payment_received(self,context={}):
res=super().payment_received(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
audit_log("Payment received: transaction_no=%s"%transaction_no)
amount=context.get("amount")
currency_id=context.get("currency_id")
pay_type=context.get("type")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
print("Sales order not found for transaction_no=%s"%transaction_no)
return
sale_id=res[0]
print("Found sales order %d for transaction_no=%s"%(sale_id,transaction_no))
sale=get_model("sale.order").browse(sale_id)
if not sale.is_paid:
if currency_id and currency_id!=sale.currency_id.id:
raise Exception("Received sales order payment in wrong currency (pmt: %s, sale: %s)"%(currency_id,sale.currency_id.id))
method=sale.pay_method_id
if not method:
raise Exception("Missing sales order payment method")
if method.type!=pay_type:
raise Exception("Received sales order payment with wrong method (pmt: %s, sale: %s)"%(pay_type,method.type))
audit_log("Creating payment for sales order %s: transaction_no=%s"%(sale.number,transaction_no))
sale.payment_received()
settings=get_model("ecom2.settings").browse(1) # XXX: change this
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
def payment_pending(self,context={}):
res=super().payment_pending(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
return
sale_id=res[0]
settings=get_model("ecom2.settings").browse(1) # XXX: change this
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
def payment_error(self,context={}):
res=super().payment_error(context=context)
if res:
return res
transaction_no=context.get("transaction_no")
res=get_model("sale.order").search([["transaction_no","=",transaction_no]])
if not res:
return
sale_id=res[0]
settings=get_model("ecom2.settings").browse(1)
if settings.ecom_return_url:
url=settings.ecom_return_url+str(sale_id)
else:
url="/ui#name=sale&mode=form&active_id=%d"%sale_id
return {
"next_url": url,
}
PaymentMethod.register()
|
Python
| 0.000035 |
@@ -436,92 +436,8 @@
no)%0A
- amount=context.get(%22amount%22)%0A currency_id=context.get(%22currency_id%22)%0A
@@ -469,16 +469,16 @@
%22type%22)%0A
+
@@ -866,209 +866,8 @@
id:%0A
- if currency_id and currency_id!=sale.currency_id.id:%0A raise Exception(%22Received sales order payment in wrong currency (pmt: %25s, sale: %25s)%22%25(currency_id,sale.currency_id.id))%0A
@@ -1261,32 +1261,32 @@
ransaction_no))%0A
-
sale
@@ -1303,16 +1303,31 @@
eceived(
+context=context
)%0A
|
3f6a08d92f46c606e99c14eb12849e1386704cf3
|
Bump version to 0.6
|
oscar/__init__.py
|
oscar/__init__.py
|
import os
# Use 'final' as the 4th element to indicate
# a full release
VERSION = (0, 6, 0, 'beta', 1)
def get_short_version():
return '%s.%s' % (VERSION[0], VERSION[1])
def get_version():
version = '%s.%s' % (VERSION[0], VERSION[1])
if VERSION[2]:
# Append 3rd digit if > 0
version = '%s.%s' % (version, VERSION[2])
if VERSION[3:] == ('alpha', 0):
version = '%s pre-alpha' % version
elif VERSION[3] != 'final':
version = '%s %s %s' % (version, VERSION[3], VERSION[4])
return version
# Cheeky setting that allows each template to be accessible by two paths.
# Eg: the template 'oscar/templates/oscar/base.html' can be accessed via both
# 'base.html' and 'oscar/base.html'. This allows Oscar's templates to be
# extended by templates with the same filename
OSCAR_MAIN_TEMPLATE_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), 'templates/oscar')
OSCAR_CORE_APPS = [
'oscar',
'oscar.apps.analytics',
'oscar.apps.checkout',
'oscar.apps.address',
'oscar.apps.shipping',
'oscar.apps.catalogue',
'oscar.apps.catalogue.reviews',
'oscar.apps.partner',
'oscar.apps.basket',
'oscar.apps.payment',
'oscar.apps.offer',
'oscar.apps.order',
'oscar.apps.customer',
'oscar.apps.promotions',
'oscar.apps.search',
'oscar.apps.voucher',
'oscar.apps.wishlists',
'oscar.apps.dashboard',
'oscar.apps.dashboard.reports',
'oscar.apps.dashboard.users',
'oscar.apps.dashboard.orders',
'oscar.apps.dashboard.promotions',
'oscar.apps.dashboard.catalogue',
'oscar.apps.dashboard.offers',
'oscar.apps.dashboard.partners',
'oscar.apps.dashboard.pages',
'oscar.apps.dashboard.ranges',
'oscar.apps.dashboard.reviews',
'oscar.apps.dashboard.vouchers',
'oscar.apps.dashboard.communications',
# 3rd-party apps that oscar depends on
'haystack',
'treebeard',
'sorl.thumbnail',
]
def get_core_apps(overrides=None):
"""
Return a list of oscar's apps amended with any passed overrides
"""
if not overrides:
return OSCAR_CORE_APPS
def get_app_label(app_label, overrides):
pattern = app_label.replace('oscar.apps.', '')
for override in overrides:
if override.endswith(pattern):
if 'dashboard' in override and 'dashboard' not in pattern:
continue
return override
return app_label
apps = []
for app_label in OSCAR_CORE_APPS:
apps.append(get_app_label(app_label, overrides))
return apps
|
Python
| 0 |
@@ -92,16 +92,17 @@
0, '
-beta', 1
+final', 0
)%0A%0A%0A
|
d9710fa2af26ab4ab5fef62adc5be670437bea68
|
Create logistics_regression.py
|
logistics_regression.py
|
logistics_regression.py
|
Python
| 0.000024 |
@@ -0,0 +1,2456 @@
+#!/usr/bin/python%0A# -*-coding:utf-8 -*-%0A%0Afrom math import exp%0Aimport random%0Aimport data_tool%0A%0A#y = x1*a1 + x2*a2 + x3*a3 + ... + xn*an + b%0Adef predict(data,%0A coef,%0A bias):%0A pred = 0.0%0A for index in range(len(coef)):%0A pred += (data%5Bindex%5D * coef%5Bindex%5D + bias)%0A return sigmoid(pred)%0A%0Adef sigmoid(x):%0A res = 0.0%0A try :%0A if x %3E 60:%0A res = 1.0 / (1.0 + exp(-60))%0A elif x %3C -60:%0A res = 1.0 / (1.0 + exp(60))%0A else:%0A res = 1.0 / (1.0 + exp(-x))%0A except:%0A print 'over math.exp range ', x%0A return res%0A%0Adef sgd(train,%0A labels,%0A coef,%0A bias,%0A learn_rate,%0A nepoch):%0A for epoch in range(nepoch):%0A sum_error = 0.0%0A for index in range(len(train)):%0A pred = predict(train%5Bindex%5D, coef, bias)%0A sum_error += (labels%5Bindex%5D - pred)%0A bias = (bias + learn_rate * sum_error * pred * (1 - pred))%0A for i in range(len(coef)):%0A coef%5Bi%5D = (coef%5Bi%5D + learn_rate * sum_error * pred * (1 - pred) * train%5Bindex%5D%5Bi%5D)%0A return coef, bias%0A%0A#generate standard normal distribution%0Adef param_gauss(size):%0A param = %5B%5D%0A for i in range(size):%0A param.append(random.gauss(mu=0, sigma=0.05))%0A return param%0A%0Adef logistic_regression(features_train, labels_train,%0A features_test, labels_test,%0A learn_rate, nepoch):%0A coef = param_gauss(len(features_train%5B0%5D))%0A bias = param_gauss(1)%5B0%5D%0A coef, bias = sgd(features_train, labels_train, coef, bias, learn_rate, nepoch)%0A pred = %5B%5D%0A for index in range(len(features_test)):%0A pred.append(predict(features_test%5Bindex%5D, coef, bias=bias))%0A return pred, coef, bias%0A%0Adef accuracy(pred, y_true):%0A correct = 0.0%0A for index in range(len(pred)):%0A if pred%5Bindex%5D == y_true%5Bindex%5D:%0A correct += 1.0%0A return correct / len(pred)%0A%0A%0A%0A#test%0Afeatures_train, labels_train, features_test, labels_test = data_tool.train_test_split(%0A data_tool.load_data(),%0A test_rate=0.3)%0A%0Afor i in range(5):%0A print 'cycle +++++++++++++++++++++++++++++++++++++++++++++++++++++ ', i%0A pred, coef, bias = logistic_regression(features_train, labels_train, features_test, labels_test,%0A learn_rate=0.02, nepoch=100)%0A score = accuracy(pred, labels_test)%0A print 'coef is: ', coef%0A print 'bias is: ', bias%0A print 'accuracy is: ', score%0A
|
|
5211117033f596bd506e81e8825ddfb08634c25e
|
Create battery.py
|
client/iOS/battery.py
|
client/iOS/battery.py
|
Python
| 0.000028 |
@@ -0,0 +1,771 @@
+# coding: utf-8%0A%0Aimport collections, objc_util%0A%0Abattery_info = collections.namedtuple('battery_info', 'level state')%0A%0Adef get_battery_info():%0A device = objc_util.ObjCClass('UIDevice').currentDevice()%0A device.setBatteryMonitoringEnabled_(True)%0A try:%0A return battery_info(int(device.batteryLevel() * 100),%0A 'unknown unplugged charging full'.split()%5Bdevice.batteryState()%5D)%0A finally:%0A device.setBatteryMonitoringEnabled_(False)%0A%0Adef battery_is_low(threshold = 20):%0A battery_info = get_battery_info()%0A return (battery_info.level %3C= threshold%0A and battery_info.state.startswith('un'))%0A%0A__all__ = 'get_battery_info battery_is_low'.split()%0A%0Aif __name__ == '__main__':%0A print(get_battery_info())%0A print(battery_is_low(15))%0A
|
|
840d4d555b7b2858ca593251f1593943b10b135b
|
Add setup_egg.py
|
setup_egg.py
|
setup_egg.py
|
Python
| 0.000001 |
@@ -0,0 +1,626 @@
+#!/usr/bin/env python%0A%22%22%22Wrapper to run setup.py using setuptools.%22%22%22%0A%0Afrom setuptools import setup %0A%0A################################################################################%0A# Call the setup.py script, injecting the setuptools-specific arguments.%0A%0Aextra_setuptools_args = dict(%0A tests_require=%5B'nose'%5D,%0A test_suite='nose.collector',%0A zip_safe=False,%0A )%0A%0A%0Aif __name__ == '__main__':%0A execfile('setup.py', dict(__name__='__main__', %0A extra_setuptools_args=extra_setuptools_args))%0A%0A%0A%0A
|
|
71a6c671f802e3b1c123b083ef34f81efeb55750
|
Create MakeMaskfiles.py
|
MakeMaskfiles.py
|
MakeMaskfiles.py
|
Python
| 0.000001 |
@@ -0,0 +1,1909 @@
+import gzip%0Aimport sys%0Afrom collections import defaultdict%0A%0A%0Adef readFasta(infile):%0A%09sequence = ''%0A%09if '.gz' in infile:%0A%09%09with gzip.open(infile) as data:%0A%09%09%09for line in data:%0A%09%09%09%09if '%3E' in line:%0A%09%09%09%09%09seqname = line.strip().replace('%3E','')%0A%09%09%09%09else:%0A%09%09%09%09%09sequence += line.strip().replace(' ','')%0A%0A%09else:%0A%09%09with open(infile) as data:%0A%09%09%09for line in data:%0A%09%09%09%09if '%3E' in line:%0A%09%09%09%09%09seqname = line.strip().replace('%3E','')%0A%09%09%09%09else:%0A%09%09%09%09%09sequence += line.strip().replace(' ','')%0A%0A%09return sequence%0A%0A%0A_, repeatmask_file, callable_mask_file, window_size, chrom, outprefix = sys.argv%0Awindow_size = int(window_size)%0A%0A#repeatmask_file = %22helperfiles/RepeatMasks/chr%7B%7D.fa.masked%22%0A#callable_mask_file = %22helperfiles/AccessibilityMasks/20140520.chr%7B%7D.strict_mask.fasta.gz%22%0A%0A%0Abases_called = 0%0A%0A# Mask file for repetitative regions%0Arepeatmask = readFasta(repeatmask_file)%0Acallable_mask = readFasta(callable_mask_file)%0A%0Awith open(outprefix + '.bed','w') as outbed, open (outprefix + '.txt','w') as out:%0A%09d = defaultdict(int)%0A%0A%09prev_base = 'Notcalled'%0A%09start = 0%0A%0A%09for i in range(len(callable_mask)): %0A%0A%09%09repeat_base = repeatmask%5Bi%5D%0A%09%09callable_base = callable_mask%5Bi%5D%0A%0A%0A%09%09# Round down to nearest window start%0A%09%09window = i - i%25window_size%0A%09%09d%5Bwindow%5D += 0%0A%09%09%09%0A%09%09if repeat_base != 'N' and callable_base == 'P':%0A%0A%09%09%09current_base = 'Called'%0A%09%09%09d%5Bwindow%5D += 1%0A%09%09else:%0A%09%09%09current_base = 'Notcalled'%0A%0A%0A%09%09# extend%0A%09%09if current_base == prev_base:%0A%09%09%09end = i%0A%0A%09%09# Make a new one%0A%09%09if current_base != prev_base:%0A%09%09%09if prev_base == 'Called':%0A%09%09%09%09outbed.write('%7B%7D%5Ct%7B%7D%5Ct%7B%7D%5Ct%7B%7D%5Cn'.format(chrom, start, end, prev_base)) %0A%0A%09%09%09start = i %0A%09%09%09end = i %0A%0A%09%09prev_base = current_base%0A%0A%09if prev_base == 'Called':%0A%09%09outbed.write('%7B%7D%5Ct%7B%7D%5Ct%7B%7D%5Ct%7B%7D%5Cn'.format(chrom, start, end, prev_base)) %0A%0A%0A%09# Write output files%0A%09for window in range(0, max(d)+window_size, window_size):%0A%09%09out.write('%7B%7D%5Ct%7B%7D%5Ct%7B%7D%5Cn'.format(chrom, window, d%5Bwindow%5D / float(window_size)))%0A%0A%0A
|
|
675de92e16e268badd8c6f5de992c3901cc8f2ce
|
Update Category Model
|
apps/shop/migrations/0004_category_parent_category.py
|
apps/shop/migrations/0004_category_parent_category.py
|
Python
| 0 |
@@ -0,0 +1,564 @@
+# -*- coding: utf-8 -*-%0A# Generated by Django 1.10.5 on 2017-02-11 19:34%0Afrom __future__ import unicode_literals%0A%0Afrom django.db import migrations, models%0Aimport django.db.models.deletion%0A%0A%0Aclass Migration(migrations.Migration):%0A%0A dependencies = %5B%0A ('shop', '0003_product_model_name'),%0A %5D%0A%0A operations = %5B%0A migrations.AddField(%0A model_name='category',%0A name='parent_category',%0A field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='shop.Category'),%0A ),%0A %5D%0A
|
|
f2d1421555f00f7bcb77f43cd010c221045c6bfd
|
Add tests for nd-shifty
|
tests/console/test_shifty.py
|
tests/console/test_shifty.py
|
Python
| 0 |
@@ -0,0 +1,1389 @@
+import click.testing%0A%0Afrom netdumplings.console.shifty import shifty%0Afrom netdumplings.exceptions import NetDumplingsError%0A%0A%0Aclass TestShifty:%0A %22%22%22%0A Test the nd-shifty commandline tool.%0A %22%22%22%0A def test_shifty(self, mocker):%0A %22%22%22%0A Test that the DumplingHub is instantiated as expected and that run()%0A is called.%0A %22%22%22%0A mock_hub = mocker.patch('netdumplings.DumplingHub')%0A%0A runner = click.testing.CliRunner()%0A result = runner.invoke(%0A shifty,%0A %5B%0A '--address', 'testhost',%0A '--in-port', 1001,%0A '--out-port', 1002,%0A '--status-freq', 99,%0A %5D,%0A )%0A%0A mock_hub.assert_called_once_with(%0A address='testhost',%0A in_port=1001,%0A out_port=1002,%0A status_freq=99,%0A )%0A%0A mock_hub.return_value.run.assert_called_once()%0A assert result.exit_code == 0%0A%0A def test_shifty_with_error(self, mocker):%0A %22%22%22%0A Test that a NetDumplingsError in DumplingHub.run() results in shifty%0A exiting with status code 1.%0A %22%22%22%0A mock_hub = mocker.patch('netdumplings.DumplingHub')%0A mock_hub.return_value.run.side_effect = NetDumplingsError%0A%0A runner = click.testing.CliRunner()%0A result = runner.invoke(shifty)%0A%0A assert result.exit_code == 1%0A
|
|
dd983ae232829559766bcdf4d2ea58861b8a47ad
|
Bring your own daemon.
|
varnish_statd.py
|
varnish_statd.py
|
Python
| 0 |
@@ -0,0 +1,902 @@
+#!/usr/bin/env python%0A%0Aimport time%0Aimport os%0Afrom pprint import pprint%0A%0Aimport varnishapi%0A%0A%0Adef stat(name=None):%0A if name is None:%0A vsc = varnishapi.VarnishStat()%0A else:%0A vsc = varnishapi.VarnishStat(opt=%5B%22-n%22, name%5D)%0A r = vsc.getStats()%0A values = dict(((k, v%5B'val'%5D) for k, v in r.iteritems()))%0A vsc.Fini()%0A return values%0A%0Anames = os.getenv('VARNISH_STATD_NAMES')%0Aif names:%0A names = names.split(',')%0Aelse:%0A names = (None,)%0A%0Await = int(os.getenv('VARNISH_STATD_WAIT', 60))%0Acarbon = os.getenv('CARBON_HOST', '127.0.0.1')%0Astats = os.getenv(%22VARNISH_STATD_STATS%22, %22hitmisspass%22).split(',')%0A%0Awhile True:%0A for n in names:%0A s = stat(n)%0A if 'hitmisspass' in stats:%0A for k in %5B'cache_hit', 'cache_hitpass', 'cache_miss'%5D:%0A v = s%5B'MAIN.%25s' %25 k%5D%0A print(%22%25s: %25s%22 %25 (k, v))%0A #pprint(s)%0A time.sleep(wait)%0A
|
|
c1d4525d5f43a5c2bfbfd88ab0dd943eb2452574
|
add 127
|
vol3/127.py
|
vol3/127.py
|
Python
| 0.999999 |
@@ -0,0 +1,700 @@
+from fractions import gcd%0A%0Aif __name__ == %22__main__%22:%0A LIMIT = 120000%0A rad = %5B1%5D * LIMIT%0A for i in range(2, LIMIT):%0A if rad%5Bi%5D == 1:%0A for j in range(i, LIMIT, i):%0A rad%5Bj%5D *= i%0A ele = %5B%5D%0A for i in range(1, LIMIT):%0A ele.append(%5Brad%5Bi%5D, i%5D)%0A ele = sorted(ele)%0A%0A ans = 0%0A for c in range(3, LIMIT):%0A chalf = c / 2%0A for %5Bra, a%5D in ele:%0A if ra * rad%5Bc%5D %3E chalf:%0A break%0A b = c - a%0A if a %3E= b:%0A continue%0A if ra * rad%5Bb%5D * rad%5Bc%5D %3E= c:%0A continue%0A if gcd(ra, rad%5Bb%5D) != 1:%0A continue%0A ans += c%0A print ans%0A
|
|
8e7b57c8bc7be6a061d0c841700291a7d85df989
|
add 174
|
vol4/174.py
|
vol4/174.py
|
Python
| 0.999959 |
@@ -0,0 +1,366 @@
+if __name__ == %22__main__%22:%0A L = 10 ** 6%0A count = %5B0%5D * (L + 1)%0A for inner in range(1, L / 4 + 1):%0A outer = inner + 2%0A used = outer * outer - inner * inner%0A while used %3C= L:%0A count%5Bused%5D += 1%0A outer += 2%0A used = outer * outer - inner * inner%0A print sum(map(lambda x: 1 if 1 %3C= x %3C= 10 else 0, count))%0A
|
|
a5b4fa261750fa79d61fc16b6061d449aa7e3523
|
Add missing block.py
|
rasterio/block.py
|
rasterio/block.py
|
Python
| 0.000088 |
@@ -0,0 +1,128 @@
+%22%22%22Raster Blocks%22%22%22%0A%0Afrom collections import namedtuple%0A%0A%0ABlockInfo = namedtuple('BlockInfo', %5B'row', 'col', 'window', 'size'%5D)%0A
|
|
65843b537e45b98068566c6cc57e4a3ad139d607
|
add variant.py
|
cendr/views/api/variant.py
|
cendr/views/api/variant.py
|
Python
| 0.000001 |
@@ -0,0 +1,1184 @@
+# NEW API%0A%0Afrom cendr import api, cache, app%0Afrom cyvcf2 import VCF%0Afrom flask import jsonify%0Aimport re%0Aimport sys%0Afrom subprocess import Popen, PIPE%0A%0A%0Adef get_region(region):%0A m = re.match(%22%5E(%5B0-9A-Za-z%5D+):(%5B0-9%5D+)-(%5B0-9%5D+)$%22, region)%0A if not m:%0A return msg(None, %22Invalid region%22, 400)%0A%0A chrom = m.group(1)%0A start = int(m.group(2))%0A end = int(m.group(3))%0A return chrom, start, end%0A%0A%[email protected]('/api/variant/%3Cregion%3E')%0Adef variant_from_region(region):%0A vcf = %22http://storage.googleapis.com/elegansvariation.org/releases/%7Bversion%7D/WI.%7Bversion%7D.vcf.gz%22.format(version = 20170312)%0A m = re.match(%22%5E(%5B0-9A-Za-z%5D+):(%5B0-9%5D+)-(%5B0-9%5D+)$%22, region)%0A if not m:%0A return %22Error - malformed region.%22, 400%0A start = int(m.group(2))%0A end = int(m.group(3))%0A%0A if start %3E= end:%0A return %22Invalid start and end region values%22, 400%0A if end - start %3E 1e5:%0A return %22Maximum region size is 100 kb%22, 400%0A%0A comm = %5B%22bcftools%22, %22view%22, vcf, region%5D%0A out, err = Popen(comm, stdout=PIPE, stderr=PIPE).communicate()%0A #if err:%0A # return err, 400%0A #v = VCF(out)%0A return jsonify(%7B%22out%22: out.splitlines(), %22comm%22: ' '.join(comm)%7D)%0A
|
|
d2bcba204d36a8ffd1e6a1ed79b89fcb6f1c88c5
|
Add file to test out kmc approach. Dump training k-mers to fasta file
|
ideas/test_kmc.py
|
ideas/test_kmc.py
|
Python
| 0 |
@@ -0,0 +1,1362 @@
+# This code will test out the idea of using kmc to%0A# 1. quickly enumerate the k-mers%0A# 2. intersect these with the training database, output as fasta%0A# 3. use that reduced fasta of intersecting kmers as the query to CMash%0A%0A####################################################################%0A# First, I will need to dump the training database to a fasta file%0Afrom CMash import MinHash as MH%0Aimport os%0Aimport blist%0A%0Atraining_out_file = '/nfs1/Koslicki_Lab/koslickd/KMC_test/NathanRefSeqTraining60mers.fa'%0Atraining_data ='/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/micopdb_n_1000_k_60.h5'%0Atraining_file_names = %22/nfs1/Koslicki_Lab/koslickd/MiCOPCMash/TrainingData/NathanRefSeq/absolute_file_names.txt%22%0A%0Afile_names = %5B%5D%0Awith open(training_file_names, 'r') as fid:%0A%09iter = 0%0A%09for line in fid.readlines():%0A%09%09line = line.strip()%0A%09%09file_names.append(os.path.basename(line))%0A%09%09iter += 1%0A%09%09if iter %3E 1000:%0A%09%09%09break%0A%0Aall_kmers = blist.blist()%0Afor file_name in file_names:%0A%09sketch = MH.import_multiple_from_single_hdf5(training_data, import_list=%5Bfile_name%5D)%5B0%5D%0A%09all_kmers += sketch._kmers%0A%0Aall_kmers_set = set(all_kmers)%0A%0Awith open(training_out_file, 'w') as fid:%0A%09iter = 0%0A%09for kmer in all_kmers_set:%0A%09%09fid.write(%22%3Eseq_%25d%5Cn%22 %25 iter)%0A%09%09fid.write(%22%25s%5Cn%22 %25 kmer)%0A%09%09iter += 1%0A%0A##########################################################################%0A%0A
|
|
c584bca2f9ac7bc005128d22b4e81a6b4885724c
|
allow Fabric to infrastructure config from YAML data files
|
templates/fabfile.py
|
templates/fabfile.py
|
Python
| 0 |
@@ -0,0 +1,484 @@
+import yaml%0Afrom fabric.api import env, run%0A%0Adef import_inf(data='web_app_basic.yml'):%0A inf_data = open(data, 'r')%0A inf = yaml.load(inf_data)%0A# for box in inf:%0A# print '%5Cn'%0A# for parameter in box:%0A# print parameter, ':', box%5Bparameter%5D%0A return inf%0A inf_data.close()%0A%0Ainf = import_inf()%0Aenv.hosts = %5Binf%5B1%5D%5B'ip'%5D%5D%0Aenv.user = 'vagrant'%0Aenv.password = 'vagrant'%0A%0Adef hostinf():%0A run('hostname')%0A run('ip a sh dev eth1')%0A run('uname -a')%0A%0A
|
|
f657a02a560af1a5860f9a532052f54330018620
|
Build "shell" target with chromium_code set.
|
ui/shell/shell.gyp
|
ui/shell/shell.gyp
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'shell',
'type': 'static_library',
'dependencies': [
'../aura/aura.gyp:aura',
'../views/views.gyp:views',
'../../skia/skia.gyp:skia',
],
'sources': [
'minimal_shell.cc',
'minimal_shell.h',
],
},
],
}
|
Python
| 0.999994 |
@@ -158,16 +158,62 @@
ile.%0A%0A%7B%0A
+ 'variables': %7B%0A 'chromium_code': 1,%0A %7D,%0A
'targe
@@ -307,24 +307,60 @@
dencies': %5B%0A
+ '../../skia/skia.gyp:skia',%0A
'../
@@ -420,44 +420,8 @@
s',%0A
- '../../skia/skia.gyp:skia',%0A
|
bddfeeec193d9fb61d99c70be68093c854e541f7
|
Add initial check thorium state
|
salt/thorium/check.py
|
salt/thorium/check.py
|
Python
| 0 |
@@ -0,0 +1,1760 @@
+'''%0AThe check Thorium state is used to create gateways to commands, the checks%0Amake it easy to make states that watch registers for changes and then just%0Asucceed or fail based on the state of the register, this creates the pattern%0Aof having a command execution get gated by a check state via a requisite.%0A'''%0A%0A%0Adef gt(name, value):%0A '''%0A Only succeed if the value in the given register location is greater than%0A the given value%0A '''%0A ret = %7B'name': name,%0A 'result': False,%0A 'comment': '',%0A 'changes': %7B%7D%7D%0A if name not in __reg__:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = 'Value %7B0%7D not in register'.format(name)%0A return ret%0A if __reg__%5Bname%5D%5B'val'%5D %3E value:%0A ret%5B'result'%5D = True%0A return ret%0A%0A%0Adef lt(name, value):%0A '''%0A Only succeed if the value in the given register location is greater than%0A the given value%0A '''%0A ret = %7B'name': name,%0A 'result': False,%0A 'comment': '',%0A 'changes': %7B%7D%7D%0A if name not in __reg__:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = 'Value %7B0%7D not in register'.format(name)%0A return ret%0A if __reg__%5Bname%5D%5B'val'%5D %3C value:%0A ret%5B'result'%5D = True%0A return ret%0A%0A%0Adef contains(name, value):%0A '''%0A Only succeed if the value in the given register location is greater than%0A the given value%0A '''%0A ret = %7B'name': name,%0A 'result': False,%0A 'comment': '',%0A 'changes': %7B%7D%7D%0A if name not in __reg__:%0A ret%5B'result'%5D = None%0A ret%5B'comment'%5D = 'Value %7B0%7D not in register'.format(name)%0A return ret%0A try:%0A if __reg__%5Bname%5D%5B'val'%5D in value:%0A ret%5B'result'%5D = True%0A except TypeError:%0A pass%0A return ret%0A
|
|
4a8ab668df01b7aaf402610df98067b046c78aa9
|
Remove unused BaseLibraryUpdateProvider
|
mopidy/backends/base.py
|
mopidy/backends/base.py
|
from __future__ import unicode_literals
import copy
class Backend(object):
#: Actor proxy to an instance of :class:`mopidy.audio.Audio`.
#:
#: Should be passed to the backend constructor as the kwarg ``audio``,
#: which will then set this field.
audio = None
#: The library provider. An instance of
#: :class:`~mopidy.backends.base.BaseLibraryProvider`, or :class:`None` if
#: the backend doesn't provide a library.
library = None
#: The playback provider. An instance of
#: :class:`~mopidy.backends.base.BasePlaybackProvider`, or :class:`None` if
#: the backend doesn't provide playback.
playback = None
#: The playlists provider. An instance of
#: :class:`~mopidy.backends.base.BasePlaylistsProvider`, or class:`None` if
#: the backend doesn't provide playlists.
playlists = None
#: List of URI schemes this backend can handle.
uri_schemes = []
# Because the providers is marked as pykka_traversible, we can't get() them
# from another actor, and need helper methods to check if the providers are
# set or None.
def has_library(self):
return self.library is not None
def has_playback(self):
return self.playback is not None
def has_playlists(self):
return self.playlists is not None
class BaseLibraryProvider(object):
"""
:param backend: backend the controller is a part of
:type backend: :class:`mopidy.backends.base.Backend`
"""
pykka_traversable = True
root_directory_name = None
"""
Name of the library's root directory in Mopidy's virtual file system.
*MUST be set by any class that implements :meth:`browse`.*
"""
def __init__(self, backend):
self.backend = backend
def browse(self, path):
"""
See :meth:`mopidy.core.LibraryController.browse`.
If you implement this method, make sure to also set
:attr:`root_directory_name`.
*MAY be implemented by subclass.*
"""
return []
# TODO: replace with search(query, exact=True, ...)
def find_exact(self, query=None, uris=None):
"""
See :meth:`mopidy.core.LibraryController.find_exact`.
*MAY be implemented by subclass.*
"""
pass
def lookup(self, uri):
"""
See :meth:`mopidy.core.LibraryController.lookup`.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def refresh(self, uri=None):
"""
See :meth:`mopidy.core.LibraryController.refresh`.
*MAY be implemented by subclass.*
"""
pass
def search(self, query=None, uris=None):
"""
See :meth:`mopidy.core.LibraryController.search`.
*MAY be implemented by subclass.*
"""
pass
class BaseLibraryUpdateProvider(object):
uri_schemes = []
def load(self):
"""Loads the library and returns all tracks in it.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def add(self, track):
"""Adds given track to library.
Overwrites any existing track with same URI.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def remove(self, uri):
"""Removes given track from library.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def commit(self):
"""Persist changes to library.
*MAY be implemented by subclass.*
"""
pass
class BasePlaybackProvider(object):
"""
:param audio: the audio actor
:type audio: actor proxy to an instance of :class:`mopidy.audio.Audio`
:param backend: the backend
:type backend: :class:`mopidy.backends.base.Backend`
"""
pykka_traversable = True
def __init__(self, audio, backend):
self.audio = audio
self.backend = backend
def pause(self):
"""
Pause playback.
*MAY be reimplemented by subclass.*
:rtype: :class:`True` if successful, else :class:`False`
"""
return self.audio.pause_playback().get()
def play(self, track):
"""
Play given track.
*MAY be reimplemented by subclass.*
:param track: the track to play
:type track: :class:`mopidy.models.Track`
:rtype: :class:`True` if successful, else :class:`False`
"""
self.audio.prepare_change()
self.change_track(track)
return self.audio.start_playback().get()
def change_track(self, track):
"""
Swith to provided track.
*MAY be reimplemented by subclass.*
:param track: the track to play
:type track: :class:`mopidy.models.Track`
:rtype: :class:`True` if successful, else :class:`False`
"""
self.audio.set_uri(track.uri).get()
return True
def resume(self):
"""
Resume playback at the same time position playback was paused.
*MAY be reimplemented by subclass.*
:rtype: :class:`True` if successful, else :class:`False`
"""
return self.audio.start_playback().get()
def seek(self, time_position):
"""
Seek to a given time position.
*MAY be reimplemented by subclass.*
:param time_position: time position in milliseconds
:type time_position: int
:rtype: :class:`True` if successful, else :class:`False`
"""
return self.audio.set_position(time_position).get()
def stop(self):
"""
Stop playback.
*MAY be reimplemented by subclass.*
:rtype: :class:`True` if successful, else :class:`False`
"""
return self.audio.stop_playback().get()
def get_time_position(self):
"""
Get the current time position in milliseconds.
*MAY be reimplemented by subclass.*
:rtype: int
"""
return self.audio.get_position().get()
class BasePlaylistsProvider(object):
"""
:param backend: backend the controller is a part of
:type backend: :class:`mopidy.backends.base.Backend`
"""
pykka_traversable = True
def __init__(self, backend):
self.backend = backend
self._playlists = []
@property
def playlists(self):
"""
Currently available playlists.
Read/write. List of :class:`mopidy.models.Playlist`.
"""
return copy.copy(self._playlists)
@playlists.setter # noqa
def playlists(self, playlists):
self._playlists = playlists
def create(self, name):
"""
See :meth:`mopidy.core.PlaylistsController.create`.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def delete(self, uri):
"""
See :meth:`mopidy.core.PlaylistsController.delete`.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def lookup(self, uri):
"""
See :meth:`mopidy.core.PlaylistsController.lookup`.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def refresh(self):
"""
See :meth:`mopidy.core.PlaylistsController.refresh`.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
def save(self, playlist):
"""
See :meth:`mopidy.core.PlaylistsController.save`.
*MUST be implemented by subclass.*
"""
raise NotImplementedError
|
Python
| 0 |
@@ -2829,746 +2829,8 @@
s%0A%0A%0A
-class BaseLibraryUpdateProvider(object):%0A uri_schemes = %5B%5D%0A%0A def load(self):%0A %22%22%22Loads the library and returns all tracks in it.%0A%0A *MUST be implemented by subclass.*%0A %22%22%22%0A raise NotImplementedError%0A%0A def add(self, track):%0A %22%22%22Adds given track to library.%0A%0A Overwrites any existing track with same URI.%0A%0A *MUST be implemented by subclass.*%0A %22%22%22%0A raise NotImplementedError%0A%0A def remove(self, uri):%0A %22%22%22Removes given track from library.%0A%0A *MUST be implemented by subclass.*%0A %22%22%22%0A raise NotImplementedError%0A%0A def commit(self):%0A %22%22%22Persist changes to library.%0A%0A *MAY be implemented by subclass.*%0A %22%22%22%0A pass%0A%0A%0A
clas
|
0b0647a0537c3c325f5cf57cae933e06f7997ea9
|
add "_" prefix to plot names
|
crosscat/tests/timing_analysis.py
|
crosscat/tests/timing_analysis.py
|
import argparse
def _generate_parser():
default_num_rows = [100, 400, 1000, 4000]
default_num_cols = [8, 16, 32]
default_num_clusters = [1, 2]
default_num_views = [1, 2]
#
parser = argparse.ArgumentParser()
parser.add_argument('--dirname', default='timing_analysis', type=str)
parser.add_argument('--num_rows', nargs='+', default=default_num_rows, type=int)
parser.add_argument('--num_cols', nargs='+', default=default_num_cols, type=int)
parser.add_argument('--num_clusters', nargs='+', default=default_num_clusters, type=int)
parser.add_argument('--num_views', nargs='+', default=default_num_views, type=int)
parser.add_argument('--no_plots', action='store_true')
return parser
def _munge_args(args):
kwargs = args.__dict__.copy()
dirname = kwargs.pop('dirname')
generate_plots = not kwargs.pop('no_plots')
return kwargs, dirname, generate_plots
if __name__ == '__main__':
from crosscat.utils.general_utils import Timer, MapperContext, NoDaemonPool
from crosscat.utils.timing_test_utils import reader, read_all_configs, \
read_results, writer, runner, gen_configs
import crosscat.utils.timing_test_utils as ttu
import experiment_runner.experiment_utils as eu
# parse args
parser = _generate_parser()
args = parser.parse_args()
kwargs, dirname, generate_plots = _munge_args(args)
config_list = ttu.gen_configs(
kernel_list = ttu._kernel_list,
n_steps=[10],
**kwargs
)
with Timer('experiments') as timer:
with MapperContext(Pool=NoDaemonPool) as mapper:
# use non-daemonic mapper since run_geweke spawns daemonic processes
eu.do_experiments(config_list, runner, writer, dirname, mapper)
pass
pass
if generate_plots:
# read the data back in
all_configs = read_all_configs(dirname)
_all_results = read_results(all_configs, dirname)
is_same_shape = lambda result: result['start_dims'] == result['end_dims']
use_results = filter(is_same_shape, _all_results)
ttu.plot_results(use_results, dirname=dirname)
|
Python
| 0 |
@@ -2121,16 +2121,91 @@
esults)%0A
+ # add plot_prefix so plots show up at top of list of files/folders%0A
@@ -2233,16 +2233,33 @@
results,
+ plot_prefix='_',
dirname
|
42e0504933d6b9e55cdb6edb9931ba080baab136
|
add 408, replace print in test cases into assert
|
python/408_valid_word_abbreviation.py
|
python/408_valid_word_abbreviation.py
|
Python
| 0.000002 |
@@ -0,0 +1,1880 @@
+%22%22%22%0AGiven a non-empty string s and an abbreviation abbr, return whether the string%0Amatches with the given abbreviation.%0A%0AA string such as %22word%22 contains only the following valid abbreviations:%0A%0A%5B%22word%22, %221ord%22, %22w1rd%22, %22wo1d%22, %22wor1%22, %222rd%22, %22w2d%22, %22wo2%22, %221o1d%22, %221or1%22,%0A%22w1r1%22, %221o2%22, %222r1%22, %223d%22, %22w3%22, %224%22%5D %0A%0ANotice that only the above abbreviations are valid abbreviations of the string%0A%22word%22. Any other string is not a valid abbreviation of %22word%22.%0A%0ANote:%0A Assume s contains only lowercase letters and abbr contains only lowercase%0A letters and digits.%0A%0AExample 1:%0A Given s = %22internationalization%22, abbr = %22i12iz4n%22:%0A%0A Return true.%0A%0AExample 2:%0A Given s = %22apple%22, abbr = %22a2e%22:%0A%0A Return false.%0A%22%22%22%0Aclass Solution(object):%0A def validWordAbbreviation(self, word, abbr):%0A %22%22%22%0A :type word: str%0A :type abbr: str%0A :rtype: bool%0A %22%22%22%0A nums = set(%5Bstr(i) for i in range(10)%5D)%0A digits = %5B%5D%0A loc = -1%0A for c in abbr:%0A if c in nums:%0A if c == '0' and digits == %5B%5D:%0A return False%0A digits.append(c)%0A else:%0A if digits:%0A loc += int(%22%22.join(digits))%0A digits = %5B%5D%0A loc += 1%0A if loc %3E= len(word):%0A return False%0A if c != word%5Bloc%5D:%0A return False%0A if digits:%0A loc += int(%22%22.join(digits))%0A return loc == len(word) - 1%0A%0Aassert Solution().validWordAbbreviation(%22a%22, %222%22) == False%0Aassert Solution().validWordAbbreviation(%22word%22, %22w2d%22) == True%0Aassert Solution().validWordAbbreviation(%22internationalization%22, %22i12iz4n%22) == True%0Aassert Solution().validWordAbbreviation(%22apple%22, %22a3e%22) == True%0Aassert Solution().validWordAbbreviation(%22apple%22, %22a2e%22) == False%0Aprint(%22all cases passed%22)%0A
|
|
5e91e3b2c7e4cbc9f14067a832b87c336c0811e7
|
update add test for c4
|
redis_i_action/c4-process-log-and-replication/test.py
|
redis_i_action/c4-process-log-and-replication/test.py
|
Python
| 0 |
@@ -0,0 +1,1767 @@
+class TestCh04(unittest.TestCase):%0A%09def setUp(self):%0A%09import redis%0A%09self.conn = redis.Redis(db=15)%0A%09self.conn.flushdb()%0A%0A%09def tearDown(self):%0A%09%09self.conn.flushdb()%0A%09%09del self.conn%0A%09%09print%0A%09%09print%0A%0A%09def test_list_item(self):%0A%09%09import pprint%0A%09%09conn = self.conn%0A%09%09print %22We need to set up just enough state so that a user can list an item%22%0A%09%09seller = 'userX'%0A%09%09item = 'itemX'%0A%09%09conn.sadd('inventory:' + seller, item)%0A%09%09i = conn.smembers('inventory:' + seller)%0A%09%09print %22The user's inventory has:%22, i%0A%09%09self.assertTrue(i)%0A%09%09print%0A%09%09print %22Listing the item...%22%0A%09%09l = list_item(conn, item, seller, 10)%0A%09%09print %22Listing the item succeeded?%22, l%0A%09%09self.assertTrue(l)%0A%09%09r = conn.zrange('market:', 0, -1, withscores=True)%0A%09%09print %22The market contains:%22%0A%09%09pprint.pprint(r)%0A%09%09self.assertTrue(r)%0A%09%09self.assertTrue(any(x%5B0%5D == 'itemX.userX' for x in r))%0A%0A%09%09def test_purchase_item(self):%0A%09%09%09self.test_list_item()%0A%09%09%09conn = self.conn%0A%09%09%09print %22We need to set up just enough state so a user can buy an item%22%0A%09%09%09buyer = 'userY'%0A%09%09%09conn.hset('users:userY', 'funds', 125)%0A%09%09%09r = conn.hgetall('users:userY')%0A%09%09%09print %22The user has some money:%22, r%0A%09%09%09self.assertTrue(r)%0A%09%09%09self.assertTrue(r.get('funds'))%0A%09%09%09print%0A%09%09%09print %22Let's purchase an item%22%0A%09%09%09p = purchase_item(conn, 'userY', 'itemX', 'userX', 10)%0A%09%09%09print %22Purchasing an item succeeded?%22, p%0A%09%09%09self.assertTrue(p)%0A%09%09%09r = conn.hgetall('users:userY')%0A%09%09%09print %22Their money is now:%22, r%0A%09%09%09self.assertTrue(r)%0A%09%09%09i = conn.smembers('inventory:' + buyer)%0A%09%09%09print %22Their inventory is now:%22, i%0A%09%09%09self.assertTrue(i)%0A%09%09%09self.assertTrue('itemX' in i)%0A%09%09%09self.assertEquals(conn.zscore('market:', 'itemX.userX'), None)%0A%0A%09%09def test_benchmark_update_token(self):%0A%09%09%09benchmark_update_token(self.conn, 5)%0A%0Aif __name__ == '__main__':%0Aunittest.main()%0A
|
|
5fd556bc01fdd5d3c9690a56a70557fbd6eb73f8
|
print the to calc statistical test
|
MachineLearning/print_ensemble_precisions.py
|
MachineLearning/print_ensemble_precisions.py
|
Python
| 0.999999 |
@@ -0,0 +1,2523 @@
+#%0A# This program is distributed without any warranty and it%0A# can be freely redistributed for research, classes or private studies,%0A# since the copyright notices are not removed.%0A#%0A# This file just read the data to calculate the statistical test%0A#%0A# Jadson Santos - [email protected]%0A# %0A# to run this exemple install pyhton modules:%0A#%0A# pip3 install pandas%0A#%0A%0A# Python Data Analysis Library%0A# https://pandas.pydata.org%0Aimport pandas as pd%0A%0A# This module provides functions for calculating mathematical statistics of numeric (Real-valued) data.%0A# https://docs.python.org/3/library/statistics.html%0Aimport statistics %0A %0A%0A#%0A# PUT THE RESULT DIRECTORY AND ENSEMBLE ALGORITHM GENEREATED BY WEKA ON HERE%0A#%0A# read the CSV file with your data base and put into a Pandas DataFrame %0A# https://www.shanelynn.ie/using-pandas-dataframe-creating-editing-viewing-data-in-python/%0A#%0A%0Adirectory = '/Users/jadson/tmp/results/' # where are the files generated by weka%0A%0A#%0A# prints the data of all homogeneous ensemble %0A#%0Adef printHomogeneo():%0A for model in %5B'knn', 'ad', 'nb', 'mlp'%5D:%0A for ensemble in %5B'bagging', 'boosting', 'stacking_homogeneo'%5D:%0A%0A print(' -------------------- ')%0A print(model+' --%3E '+ensemble)%0A print(' -------------------- ')%0A%0A for num_classifiers in %5B10, 15, 20%5D:%0A df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )%0A%0A #Getting the precision data%0A precision = df%5B'IR_precision'%5D.values%0A%0A # %7B0%7D is the num of argument of format function : %7B.4%7D sets the precision to 4 decimals.%0A for p in range(len(precision)):%0A print('%7B0:.4%7D'.format(precision%5Bp%5D)) %0A%0A%0A#%0A# prints the data of all heterogeneous ensemble %0A#%0Adef printHeterogeneo():%0A for ensemble in %5B'stacking_heterogeneo'%5D:%0A%0A print(' -------------------- ')%0A print(ensemble)%0A print(' -------------------- ')%0A%0A for model in %5B'MLP_AD', 'MLP_NB', 'MLP_NB_AD', 'NB_AD'%5D:%0A%0A for num_classifiers in %5B10, 15, 20%5D:%0A df = pd.read_csv( directory+ensemble+'_'+model+'_'+str(num_classifiers)+'.csv' )%0A%0A #Getting the precision data%0A precision = df%5B'IR_precision'%5D.values%0A%0A # %7B0%7D is the num of argument of format function : %7B.4%7D sets the precision to 4 decimals.%0A for p in range(len(precision)):%0A print('%7B0:.4%7D'.format(precision%5Bp%5D)) %0A%0A%0A%0AprintHomogeneo()%0AprintHeterogeneo()%0A%0A
|
|
88d2ad776518d62a66fa3b8f7dd7520cff3debfc
|
Create bulk_parse.py
|
scripts/bulk_parse.py
|
scripts/bulk_parse.py
|
Python
| 0.000008 |
@@ -0,0 +1 @@
+%0A
|
|
10d71b1208175eac4af0a20d7ee0a8176c7829ef
|
add new rename script to prepend to *.c files
|
rename/prepend.py
|
rename/prepend.py
|
Python
| 0 |
@@ -0,0 +1,585 @@
+import os%0Aimport sys%0A%0Aif __name__ == '__main__':%0A if len(sys.argv) %3C 2:%0A print 'usage: %3Cpath%3E %3Cprepend%3E'%0A sys.exit()%0A%0Aexts=%5B'.c'%5D%0Achange_count = 0%0Afor root, dirs, files in os.walk(sys.argv%5B1%5D):%0A for filename in files:%0A if any(filename.lower().endswith(ext) for ext in exts):%0A if sys.argv%5B2%5D not in filename :%0A os.rename(os.path.join(root, filename), os.path.join(root, sys.argv%5B2%5D + filename))%0A print os.path.join(root, sys.argv%5B2%5D + filename)%0A change_count += 1%0A%0Aprint 'files changed: ', change_count%0A
|
|
02ad029840b2e770bc802fd7f8504498cb0f756d
|
Add `issubset` and `issuperset` tests
|
lib/ansible/plugins/test/mathstuff.py
|
lib/ansible/plugins/test/mathstuff.py
|
Python
| 0.000001 |
@@ -0,0 +1,1012 @@
+# (c) 2016, Ansible, Inc%0A#%0A# This file is part of Ansible%0A#%0A# Ansible is free software: you can redistribute it and/or modify%0A# it under the terms of the GNU General Public License as published by%0A# the Free Software Foundation, either version 3 of the License, or%0A# (at your option) any later version.%0A#%0A# Ansible is distributed in the hope that it will be useful,%0A# but WITHOUT ANY WARRANTY; without even the implied warranty of%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the%0A# GNU General Public License for more details.%0A#%0A# You should have received a copy of the GNU General Public License%0A# along with Ansible. If not, see %3Chttp://www.gnu.org/licenses/%3E.%0A%0A__metaclass__ = type%0A%0Adef issubset(a, b):%0A return set(a) %3C= set(b)%0A%0Adef issuperset(a, b):%0A return set(a) %3E= set(b)%0A%0Aclass TestModule(object):%0A ''' Ansible math jinja2 tests '''%0A%0A def tests(self):%0A return %7B%0A # set theory%0A 'issubset': issubset,%0A 'issuperset': issuperset,%0A %7D%0A
|
|
adede4415e36830485429f49b8476f655f3d4929
|
Add environment.py
|
tests/environment.py
|
tests/environment.py
|
Python
| 0.000003 |
@@ -0,0 +1,497 @@
+# -*- coding: UTF-8 -*-%0Aimport shutil%0Afrom steps.common_steps.common_environment import docker_setup%0A%0A%0Adef before_all(context):%0A docker_setup(context)%0A context.build_or_pull_image(skip_pull=True, skip_build=True)%0A%0A%0Adef after_scenario(context, scenario):%0A if 'KEEP_CONTAINER_AFTER_TEST' in context.config.userdata:%0A return%0A context.remove_container()%0A%0A%0Adef after_all(context):%0A if hasattr(context, 'temp_dir'):%0A shutil.rmtree(context.temp_dir) # FIXME catch exception%0A
|
|
bf86584829f56f91b363f251d77f3157f952db0f
|
Add tests for masking of data based on being within a range of values
|
tests/test_cyprep.py
|
tests/test_cyprep.py
|
Python
| 0 |
@@ -0,0 +1,1050 @@
+import unittest%0A%0Aimport numpy as np%0A%0Aimport yatsm._cyprep%0A%0A%0Aclass TestCyPrep(unittest.TestCase):%0A%0A @classmethod%0A def setUpClass(cls):%0A # Test data%0A n_band = 7%0A n_mask = 50%0A n_images = 1000%0A%0A cls.data = np.random.randint(%0A 0, 10000, size=(n_band, n_images)).astype(np.int32)%0A for b in range(n_band):%0A cls.data%5Bb, np.random.choice(np.arange(0, n_images),%0A size=n_mask, replace=False)%5D = 16000%0A%0A cls.mins = np.repeat(0, n_band).astype(np.int16)%0A cls.maxes = np.repeat(10000, n_band).astype(np.int16)%0A%0A def test_get_valid_mask(self):%0A truth = np.all(%5B((b %3E _min) & (b %3C _max)) for b, _min, _max in%0A zip(np.rollaxis(self.data, 0),%0A self.mins,%0A self.maxes)%5D, axis=0)%0A%0A np.testing.assert_equal(%0A truth,%0A yatsm._cyprep.get_valid_mask(self.data, self.mins, self.maxes))%0A%0A%0Aif __name__ == '__main__':%0A unittest.main()%0A
|
|
9c249d3f9d202632b7fd2241d39dfc2e180fd358
|
Add ledger tests
|
tests/test_ledger.py
|
tests/test_ledger.py
|
Python
| 0.000001 |
@@ -0,0 +1,1566 @@
+# -*- coding: utf-8 -*-%0Aimport pytest%0A%0Afrom accounts.ledger import Ledger%0A%0A# Database migrations run for each test in this module.%0A# See %60conftest.pytest_runtest*%60.%0ADB_MIGRATIONS = %5B'0003-create-balances', '0004-create-movements'%5D%0A%0A# Fixtures ###%0A%[email protected]%0Adef ledger(db):%0A return Ledger(db.connection)%0A%0A# Tests ###%0A%0ATOKEN = 'test'%0AAMOUNT = 100%0A%0Adef _get_balance(db, token):%0A db.execute(%22SELECT amount FROM balances WHERE token = %25s%22, %5Btoken%5D)%0A res = db.fetchone()%0A return res and res%5B0%5D%0A%0Adef test_balance(db, ledger):%0A assert ledger.balance(TOKEN) == 0%0A%0A db.execute(%22INSERT INTO balances (token, amount) VALUES (%25s, %25s)%22, %5BTOKEN, AMOUNT%5D)%0A db.connection.commit()%0A assert ledger.balance(TOKEN) == AMOUNT%0A%0Adef test_deposit(db, ledger):%0A # Account doesn't exist yet%0A assert _get_balance(db, TOKEN) is None%0A%0A assert ledger.deposit(TOKEN, AMOUNT) is True%0A assert _get_balance(db, TOKEN) == AMOUNT%0A%0A db.execute(%22SELECT amount FROM movements WHERE token = %25s%22, %5BTOKEN%5D)%0A assert db.fetchone()%5B0%5D == AMOUNT%0A%0Adef test_withdraw(db, ledger):%0A assert _get_balance(db, TOKEN) is None%0A%0A # Insufficient funds%0A assert ledger.withdraw(TOKEN, AMOUNT) is False%0A assert _get_balance(db, TOKEN) is None%0A%0A db.execute(%22INSERT INTO balances (token, amount) VALUES (%25s, %25s)%22, %5BTOKEN, AMOUNT+10%5D)%0A db.connection.commit()%0A assert ledger.withdraw(TOKEN, AMOUNT) is True%0A assert _get_balance(db, TOKEN) == 10%0A%0A db.execute(%22SELECT amount FROM movements WHERE token = %25s%22, %5BTOKEN%5D)%0A assert db.fetchone()%5B0%5D == -AMOUNT%0A
|
|
65f6b1101aba2086654f2ff0ff3e942f69d584b2
|
Add an application that returns spaCy similarity query
|
app/app.py
|
app/app.py
|
Python
| 0.000278 |
@@ -0,0 +1,618 @@
+from flask import Flask, jsonify%0Aimport spacy.en%0Afrom numpy import dot%0Afrom numpy.linalg import norm%0A%0Aapp = Flask(__name__)%0Anlp = spacy.en.English()%0A%0Adef cossim(a, b):%0A return dot(a, b) / (norm(a) * norm(b))%0A%[email protected]('/')%0Adef index():%0A return %22Hello, World!%22%0A%0A%[email protected]('/spaCy/api/similarity/%3Cword1%3E/%3Cword2%3E', methods=%5B'GET'%5D)%0Adef get_spacy_sim(word1, word2):%0A tok1 = nlp(word1)%5B0%5D%0A tok2 = nlp(word2)%5B0%5D%0A sim = cossim(tok1.repvec, tok2.repvec)%0A print type(sim)%0A return jsonify(%7B'word1': word1, 'word2': word2, 'similarity': float(sim)%7D)%0A%0A%0Aif __name__ == '__main__':%0A app.run(debug=True)%0A%0A
|
|
ddd4473f8edc4e7cfc503fc6cdbb570f33f224a4
|
Add Preprocessor module Edges to generate possible edges between two entities given the relation type
|
nala/preprocessing/edges.py
|
nala/preprocessing/edges.py
|
Python
| 0.000003 |
@@ -0,0 +1,2006 @@
+import abc%0Afrom nala.structures.data import Edge%0A%0Aclass EdgeGenerator:%0A %22%22%22%0A Abstract class for generating edges between two entities. Each edge represents%0A a possible relationship between the two entities%0A Subclasses that inherit this class should:%0A * Be named %5BName%5DEdgeGenerator%0A * Implement the abstract method generate%0A * Append new items to the list field %22edges%22 of each Part in the dataset%0A %22%22%22%0A%0A @abc.abstractmethod%0A def generate(self, dataset):%0A %22%22%22%0A :type dataset: nala.structures.data.Dataset%0A %22%22%22%0A return%0A%0A%0Aclass SimpleEdgeGenerator(EdgeGenerator):%0A %22%22%22%0A Simple implementation of generating edges between the two entities%0A if they are contained in the same sentence.%0A%0A Implements the abstract class EdgeGenerator.%0A%0A :type entity1_class: str%0A :type entity2_class: str%0A :type relation_type: str%0A %22%22%22%0A%0A def __init__(self, entity1_class, entity2_class, relation_type):%0A self.entity1_class = entity1_class%0A self.entity2_class = entity2_class%0A self.relation_type = relation_type%0A%0A def generate(self, dataset):%0A from itertools import product%0A for part in dataset.parts():%0A for ann_1, ann_2 in product(%0A (ann for ann in part.annotations if ann.class_id == self.entity1_class),%0A (ann for ann in part.annotations if ann.class_id == self.entity2_class)):%0A index_1 = part.get_sentence_index_for_annotation(ann_1)%0A index_2 = part.get_sentence_index_for_annotation(ann_2)%0A if index_1 == None:%0A print (ann_1)%0A x = input()%0A if index_2 == None:%0A print (ann_2)%0A x = input()%0A if index_1 == index_2 and index_1 != None:%0A part.edges.append(%0A Edge(ann_1, ann_2, self.relation_type,%0A part.get_sentence_string_array()%5Bindex_1%5D))%0A
|
|
0377cf9cc3c2460c2936ec9153edbdb196cff5bf
|
Add zdt agent
|
zephyrus/examples/zdt/agent.py
|
zephyrus/examples/zdt/agent.py
|
Python
| 0.000017 |
@@ -0,0 +1,583 @@
+import sys%0Afrom itertools import islice%0Afrom math import sqrt%0A%0Afrom zephyrus.agent import Agent%0Afrom zephyrus.message import Message%0A%0A%0Aclass ZDTAgent(Agent):%0A def mainloop(self):%0A msg = self.socket_receive.recv()%0A action = self.perceive(msg.content)%0A self.socket_send(str(action))%0A%0A def act(self, perceived):%0A f1 = perceived%5B0%5D%0A g = 1 + 9 * sum(islice(perceived, 1, None)) / (len(perceived) - 1)%0A zdt = 1 - sqrt(f1 / g)%0A return Message(%22agent%22, %22RESULT%22, zdt)%0A%0A%0Aif __name__ == '__main__':%0A ZDTAgent(1, *sys.argv%5B1:%5D).start()%0A
|
|
bc812daf7c99b34a3952d933666f240597eb835d
|
add a spider for Xin Shi Dai board, Ya Zhou catagory.
|
t66ySpider/t66ySpider/spiders/t66yXinshidaiYazhouSpider.py
|
t66ySpider/t66ySpider/spiders/t66yXinshidaiYazhouSpider.py
|
Python
| 0 |
@@ -0,0 +1,1123 @@
+# -*- coding: utf-8 -*-%0A%0Aimport scrapy%0A%0Afrom t66ySpider.items import T66YspiderXinshidaiItem%0A%0A%0Aclass t66yDagaierSpider(scrapy.Spider):%0A name = 'XinShiDaiYaZhou'%0A allowed_domains = %5B't66y.com'%5D%0A start_urls = %5B%22http://t66y.com/thread0806.php?fid=8&type=1%22%5D%0A unicode_next_page = u'%5Cu4e0b%5Cu4e00%5Cu9801'%0A%0A def parse(self, response):%0A thread_hrefs = response.selector.xpath('//h3/a/@href')%0A%0A for thread_href in thread_hrefs:%0A thread_url = response.urljoin(thread_href.extract())%0A yield scrapy.Request(thread_url, callback=self.parse_thread)%0A%0A next_page_href = response.selector.xpath(%0A %22//a%5Btext()='%25s'%5D/@href%22 %25 self.unicode_next_page)%5B0%5D%0A next_page_url = response.urljoin(next_page_href.extract())%0A%0A yield scrapy.Request(next_page_url, callback=self.parse)%0A%0A def parse_thread(self, response):%0A item = T66YspiderXinshidaiItem()%0A item%5B't_title'%5D = response.selector.xpath(%0A 'string(//title)')%5B0%5D.extract()%0A item%5B't_image_list'%5D = response.selector.xpath(%0A '//input/@src').extract()%0A yield item%0A
|
|
25d8cbfd4b59166ba748d5cd42fbcd7ffe925f0e
|
Allow using exogenous data in hierachical models #124
|
tests/hierarchical/test_hierarchy_AU_AllMethods_Exogenous_all_nodes.py
|
tests/hierarchical/test_hierarchy_AU_AllMethods_Exogenous_all_nodes.py
|
Python
| 0 |
@@ -0,0 +1,2004 @@
+import pandas as pd%0Aimport numpy as np%0Aimport pyaf.HierarchicalForecastEngine as hautof%0Aimport pyaf.Bench.TS_datasets as tsds%0A%0Aimport datetime%0A%0A#get_ipython().magic('matplotlib inline')%0A%0Adef create_exog_data(b1):%0A # fake exog data based on date variable%0A lDate1 = b1.mPastData%5B'Date'%5D%0A lDate2 = b1.mFutureData%5B'Date'%5D # not needed. exogfenous data are missing when not available.%0A lDate = lDate1.append(lDate2)%0A lExogenousDataFrame = pd.DataFrame()%0A lExogenousDataFrame%5B'Date'%5D = lDate%0A lExogenousDataFrame%5B'Date_second'%5D = lDate.dt.second%0A lExogenousDataFrame%5B'Date_minute'%5D = lDate.dt.minute%0A lExogenousDataFrame%5B'Date_hour'%5D = lDate.dt.hour%0A lExogenousDataFrame%5B'Date_dayofweek'%5D = lDate.dt.dayofweek%0A lExogenousDataFrame%5B'Date_day'%5D = lDate.dt.day%0A lExogenousDataFrame%5B'Date_dayofyear'%5D = lDate.dt.dayofyear%0A lExogenousDataFrame%5B'Date_month'%5D = lDate.dt.month%0A lExogenousDataFrame%5B'Date_week'%5D = lDate.dt.week%0A # a column in the exog data can be of any type%0A lExogenousDataFrame%5B'Date_day_name'%5D = lDate.dt.day_name()%0A lExogenousDataFrame%5B'Date_month_name'%5D = lDate.dt.month_name()%0A lExogenousVariables = %5Bcol for col in lExogenousDataFrame.columns if col.startswith('Date_')%5D%0A lExogenousData = (lExogenousDataFrame , lExogenousVariables) %0A return lExogenousData%0A%0A%0Ab1 = tsds.load_AU_hierarchical_dataset();%0Adf = b1.mPastData;%0A%0AlEngine = hautof.cHierarchicalForecastEngine()%0AlEngine.mOptions.mHierarchicalCombinationMethod = %5B%22BU%22 , 'TD' , 'MO' , 'OC'%5D;%0AlEngine.mOptions.mNbCores = 16%0AlEngine%0A%0A%0AH = b1.mHorizon;%0A%0A# lEngine.mOptions.enable_slow_mode();%0A# lEngine.mOptions.mDebugPerformance = True;%0A%0AlExogenousData = create_exog_data(b1)%0AlEngine.train(df , b1.mTimeVar , b1.mSignalVar, H, b1.mHierarchy, iExogenousData = lExogenousData);%0A%0AlEngine.getModelInfo();%0A#lEngine.standardPlots(%22outputs/AU%22);%0A%0Adfapp_in = df.copy();%0Adfapp_in.tail()%0A%0Adfapp_out = lEngine.forecast(dfapp_in, H);%0A#dfapp_out.to_csv(%22outputs/Hierarchical_AU_apply_out.csv%22)%0A
|
|
b135e8e473837909c6847f8a52711527409b5224
|
Add windows build tools
|
tools/build_mwpfh.py
|
tools/build_mwpfh.py
|
Python
| 0 |
@@ -0,0 +1,930 @@
+from __future__ import print_function%0A%0Aimport subprocess%0Aimport sys%0Aimport os%0A%0Apath = os.path.split(__file__)%5B0%5D%0Aif path:%0A%09os.chdir(path)%0A%0Aenvironments = %5B'26', '27', '32', '33', '34'%5D%0A%0Atarget = %22pypi%22 if %22--push%22 in sys.argv else %22test%22%0A%0Areturnvalues = %7B%7D%0A%0Adef run(pyver, cmds, target=None):%0A%09cmd = %5Br%22C:%5CPython%25s%5CPython.exe%22 %25 pyver, %22setup.py%22%5D + cmds%0A%09if target:%0A%09%09cmd += %5B%22-r%22, target%5D%0A%0A%09print(%22 %22.join(cmd), end=%22 %22)%0A%09retval = subprocess.call(cmd, stdout=open(%22%25s%25s.log%22 %25 (cmds%5B0%5D, pyver), 'w'), stderr=subprocess.STDOUT, cwd=%22..%22)%0A%09if not retval:%0A%09%09print(%22%5BOK%5D%22)%0A%09else:%0A%09%09print(%22%5BFAILED (%25i)%5D%22 %25 retval)%0A%09return retval%0A%0Arun(%2227%22, %5B%22register%22%5D, target)%0A%0Aif 'failed' in open('register27.log').read():%0A%09raise Exception%0A%0Afor pyver in environments:%0A%09print()%0A%09try:%0A%09%09os.unlink('mwparserfromhell/parser/_tokenizer.pyd')%0A%09except WindowsError:%0A%09%09pass%0A%09%0A%09if run(pyver, %5B%22test%22%5D) == 0:%0A%09%09run(pyver, %5B%22bdist_wheel%22, %22upload%22%5D, target)
|
|
002842c4d7db431a4dedc067ef54dab8747d70f4
|
add debug statement
|
library/pyjamas/media/Video.mshtml.py
|
library/pyjamas/media/Video.mshtml.py
|
class Video(Media):
def __init__(self, src=None, **kwargs):
print "create object"
obj = DOM.createElement("OBJECT")
DOM.setAttribute(obj, "TYPE", "application/x-mplayer2")
#DOM.setAttribute(obj, "type", "application/x-oleobject")
DOM.setAttribute(obj, "classid",
#"CLSID:22D6F312-B0F6-11D0-94AB-0080C74C7E95")
"CLSID:6BF52A52-394A-11d3-B153-00C04F79FAA6")
print "set element"
self.setElement(obj)
print "widget init"
Media.__init__(self, **kwargs)
print "setSrc"
if src:
self.setSrc(src)
#self.setID("MediaPlayer")
self.dispparam = DOM.createElement("PARAM")
DOM.setAttribute(self.dispparam, "name", "ShowDisplay")
DOM.setBooleanAttribute(self.dispparam, "VALUE", "false")
self.getElement().appendChild(self.dispparam)
def setSrc(self, src):
print "setSrc", src
#self.srcparam = DOM.createElement("PARAM")
#DOM.setAttribute(self.srcparam, "name", "FileName")
#DOM.setAttribute(self.srcparam, "VALUE", src)
#self.getElement().appendChild(self.srcparam)
obj = self.getElement()
DOM.setAttribute(obj, "URL", src)
#obj.URL = src
def setControls(self, controls):
print "setControls", controls
self.ctrlparam = DOM.createElement("PARAM")
DOM.setAttribute(self.ctrlparam, "name", "ShowControls")
DOM.setBooleanAttribute(self.ctrlparam, "VALUE",
controls and "true" or "false")
self.getElement().appendChild(self.ctrlparam)
def setStatusbar(self, statusbar):
print "setstatus", statusbar
self.statparam = DOM.createElement("PARAM")
DOM.setAttribute(self.statparam, "name", "ShowStatusBar")
DOM.setBooleanAttribute(self.statparam, "VALUE",
statusbar and "true" or "false")
self.getElement().appendChild(self.statparam)
def setLoop(self, autorewind):
print "autorewind", autorewind
self.loopparam = DOM.createElement("PARAM")
DOM.setAttribute(self.loopparam, "name", "autorewind")
DOM.setBooleanAttribute(self.loopparam, "VALUE",
autorewind and "true" or "false")
self.getElement().appendChild(self.loopparam)
def setAutoplay(self, autostart):
print "autoplay", autostart
self.playparam = DOM.createElement("PARAM")
DOM.setAttribute(self.playparam, "name", "autostart")
DOM.setBooleanAttribute(self.playparam, "VALUE",
autostart and "true" or "false")
self.getElement().appendChild(self.playparam)
|
Python
| 0.000018 |
@@ -1236,16 +1236,39 @@
ement()%0A
+ print dir(obj)%0A
|
012acdc7a280b307bbb110449dcfee5d05a77e38
|
Create new package (#6379)
|
var/spack/repos/builtin/packages/r-chemometrics/package.py
|
var/spack/repos/builtin/packages/r-chemometrics/package.py
|
Python
| 0 |
@@ -0,0 +1,2392 @@
+##############################################################################%0A# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.%0A# Produced at the Lawrence Livermore National Laboratory.%0A#%0A# This file is part of Spack.%0A# Created by Todd Gamblin, [email protected], All rights reserved.%0A# LLNL-CODE-647188%0A#%0A# For details, see https://github.com/spack/spack%0A# Please also see the NOTICE and LICENSE files for our notice and the LGPL.%0A#%0A# This program is free software; you can redistribute it and/or modify%0A# it under the terms of the GNU Lesser General Public License (as%0A# published by the Free Software Foundation) version 2.1, February 1999.%0A#%0A# This program is distributed in the hope that it will be useful, but%0A# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF%0A# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and%0A# conditions of the GNU Lesser General Public License for more details.%0A#%0A# You should have received a copy of the GNU Lesser General Public%0A# License along with this program; if not, write to the Free Software%0A# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA%0A##############################################################################%0Afrom spack import *%0A%0A%0Aclass RChemometrics(RPackage):%0A %22%22%22R companion to the book %22Introduction to Multivariate Statistical Analysis%0A in Chemometrics%22 written by K. Varmuza and P. Filzmoser (2009).%22%22%22%0A%0A homepage = %22https://cran.r-project.org/web/packages/chemometrics/index.html%22%0A url = %22https://cran.r-project.org/src/contrib/chemometrics_1.4.2.tar.gz%22%0A list_url = %22https://cran.rstudio.com/src/contrib/Archive/chemometrics%22%0A%0A version('1.4.2', '8137b0ca4004add9cc2ea81d2c54427f')%0A version('1.4.1', '1e5a89442bb4a61db0da884eedd74fc2')%0A version('1.3.9', '2b619791896db1513ca3d714acb68af3')%0A version('1.3.8', '7fad828bd094b5485fbf20bdf7d3d0d1')%0A version('1.3.7', 'a9e2f32efb1545421dd96185fd849184')%0A%0A depends_on('[email protected]:3.4.9')%0A depends_on('r-rpart', type=('build', 'run'))%0A depends_on('r-mclust', type=('build', 'run'))%0A depends_on('r-lars', type=('build', 'run'))%0A depends_on('r-robustbase', type=('build', 'run'))%0A depends_on('r-e1071', type=('build', 'run'))%0A depends_on('r-pls', type=('build', 'run'))%0A depends_on('r-som', type=('build', 'run'))%0A depends_on('r-pcapp', type=('build', 'run'))%0A
|
|
aa78a2670766b0a5e093a1876cb402ed513573bd
|
Add script to explore parameters units
|
openfisca_france/scripts/parameters/explore_parameters_unit.py
|
openfisca_france/scripts/parameters/explore_parameters_unit.py
|
Python
| 0 |
@@ -0,0 +1,1857 @@
+# -*- coding: utf-8 -*-%0A%0A%0Afrom openfisca_core.parameters import ParameterNode, Scale%0Afrom openfisca_france import FranceTaxBenefitSystem%0A%0A%0Atax_benefit_system = FranceTaxBenefitSystem()%0Aparameters = tax_benefit_system.parameters%0A%0A%0Adef get_parameters_by_unit(parameter, parameters_by_unit = None):%0A if parameters_by_unit is None:%0A parameters_by_unit = dict(%0A scale = list(),%0A none = list(),%0A currency = list(),%0A rate = list(),%0A year = list(),%0A )%0A for name, sub_parameter in parameter.children.items():%0A if isinstance(sub_parameter, ParameterNode):%0A get_parameters_by_unit(sub_parameter, parameters_by_unit)%0A else:%0A if isinstance(sub_parameter, Scale):%0A parameters_by_unit%5B'scale'%5D.append(sub_parameter)%0A elif sub_parameter.unit is None:%0A parameters_by_unit%5B'none'%5D.append(sub_parameter)%0A elif sub_parameter.unit == %22/1%22:%0A parameters_by_unit%5B'rate'%5D.append(sub_parameter)%0A elif sub_parameter.unit == %22currency%22:%0A parameters_by_unit%5B'currency'%5D.append(sub_parameter)%0A elif sub_parameter.unit == %22year%22:%0A parameters_by_unit%5B'year'%5D.append(sub_parameter)%0A else:%0A raise ValueError(%22Parameter %7B%7D has a stange unit %7B%7D%22.format(%0A sub_parameter.name, sub_parameter.unit))%0A%0A return parameters_by_unit%0A%0A%0Aif __name__ == '__main__':%0A parameters_by_unit = get_parameters_by_unit(parameters)%0A print('Distribution of parameters types:')%0A for type_, sub_parameters in parameters_by_unit.items():%0A print(type_, len(parameters_by_unit%5Btype_%5D))%0A%0A print('%5Cn')%0A print('List of parameters with no units')%0A%0A for param in parameters_by_unit%5B'none'%5D:%0A print (param.name)%0A%0A
|
|
e095b6a76ac36255983d8c69d4899d64178e0ef3
|
Add segment_euclidean_length tests module
|
tests/plantcv/morphology/test_segment_euclidean_length.py
|
tests/plantcv/morphology/test_segment_euclidean_length.py
|
Python
| 0.000001 |
@@ -0,0 +1,902 @@
+import pytest%0Aimport cv2%0Aimport numpy as np%0Afrom plantcv.plantcv import outputs%0Afrom plantcv.plantcv.morphology import segment_euclidean_length%0A%0A%0Adef test_segment_euclidean_length(morphology_test_data):%0A # Clear previous outputs%0A outputs.clear()%0A skeleton = cv2.imread(morphology_test_data.skel_img, -1)%0A _ = segment_euclidean_length(segmented_img=skeleton,%0A objects=morphology_test_data.load_segments(morphology_test_data.segments_file, %22leaves%22))%0A assert len(outputs.observations%5B'default'%5D%5B'segment_eu_length'%5D%5B'value'%5D) == 4%0A%0A%0Adef test_segment_euclidean_length_bad_input():%0A skel = np.zeros((10, 10), dtype=np.uint8)%0A edges = %5Bnp.array(%5B%5B%5B5, 3%5D%5D, %5B%5B4, 4%5D%5D, %5B%5B3, 5%5D%5D, %5B%5B4, 6%5D%5D, %5B%5B5, 7%5D%5D, %5B%5B6, 6%5D%5D, %5B%5B7, 5%5D%5D, %5B%5B6, 4%5D%5D%5D, dtype=np.int32)%5D%0A with pytest.raises(RuntimeError):%0A _ = segment_euclidean_length(segmented_img=skel, objects=edges)%0A
|
|
26ab37868e67b5b815cf8df67cc04876ff44c148
|
Add file for Nongrammar entities tests
|
tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py
|
tests/rules_tests/isValid_tests/NongrammarEntitiesTest.py
|
Python
| 0 |
@@ -0,0 +1,287 @@
+#!/usr/bin/env python%0A%22%22%22%0A:Author Patrik Valkovic%0A:Created 23.06.2017 16:39%0A:Licence GNUv3%0APart of grammpy%0A%0A%22%22%22%0A%0Afrom unittest import main, TestCase%0Afrom grammpy import Rule%0Afrom .grammar import *%0A%0A%0Aclass NongrammarEntitiesTest(TestCase):%0A pass%0A%0A%0Aif __name__ == '__main__':%0A main()
|
|
e80ec7adc6fe71310e1c2adba720be9640a49d0f
|
test code for midiGenerator
|
src/test4.py
|
src/test4.py
|
Python
| 0 |
@@ -0,0 +1,476 @@
+import midiGenerator%0Agenerator = midiGenerator.MidiGenerator(200,1)%0Achannel = midiGenerator.Channel()%0Anote = midiGenerator.Note(43,100,200)%0Achannel.addNote(note)%0Achannel.addNote(midiGenerator.Note(45,200,300))%0Achannel.addNote(midiGenerator.Note(57,300,400))%0Achannel.addNote(midiGenerator.Note(38,400,500))%0Achannel.addNote(midiGenerator.Note(33,500,600))%0Achannel.addNote(midiGenerator.Note(45,600,700))%0Achannel.endTrack()%0Agenerator.addChannel(channel)%0Agenerator.save(%22t.midi%22)%0A
|
|
3757ec1444c55049009bc7d40490fa4a1a4f33d4
|
Fix send_response implementation
|
run_dev_server.py
|
run_dev_server.py
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import optparse
import os
import sys
import time
from build import generate_deps_js_contents as deps_generator
import SocketServer
import SimpleHTTPServer
import BaseHTTPServer
DEFAULT_PORT = 8003
DEPS_CHECK_DELAY = 30
toplevel_dir = os.path.abspath(os.path.dirname(__file__))
src_dir = os.path.join(toplevel_dir, 'src')
test_data_dir = os.path.join(toplevel_dir, 'test_data')
class Handler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
SimpleHTTPServer.SimpleHTTPRequestHandler.__init__(self, *args, **kwargs)
def do_GET_json_tests(self):
def is_test(x):
basename = os.path.basename(x)
if basename.startswith('.'):
return False
if basename.endswith('_test.html'):
return True
return False
test_filenames = []
for dirpath, dirnames, filenames in os.walk(src_dir):
for f in filenames:
x = os.path.join(dirpath, f)
y = '/' + os.path.relpath(x, toplevel_dir)
if is_test(y):
test_filenames.append(y)
test_filenames.sort()
tests_as_json = json.dumps(test_filenames)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Length', len(tests_as_json))
self.end_headers()
self.wfile.write(tests_as_json)
def send_response(self, code):
super(Handler, self).send_resposne(code)
if code == 200:
self.send_header('Cache-Control', 'no-cache')
def do_GET_example_files(self):
data_files = []
for dirpath, dirnames, filenames in os.walk(test_data_dir):
for f in filenames:
data_files.append(f)
data_files.sort()
files_as_json = json.dumps(data_files)
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Length', len(files_as_json))
self.end_headers()
self.wfile.write(files_as_json)
def do_GET_deps(self):
current_time = time.time()
if self.server.next_deps_check < current_time:
self.log_message('Regenerating ' + self.path)
self.server.deps = deps_generator.generate_deps_js()
self.server.next_deps_check = current_time + DEPS_CHECK_DELAY
self.send_response(200)
self.send_header('Content-Type', 'application/javascript')
self.send_header('Cache-Control', 'no-cache')
self.send_header('Content-Length', len(self.server.deps))
self.end_headers()
self.wfile.write(self.server.deps)
def do_GET(self):
if self.path == '/json/examples':
self.do_GET_example_files()
return
if self.path == '/json/tests':
self.do_GET_json_tests()
return
if self.path == '/deps.js':
self.do_GET_deps()
return
return SimpleHTTPServer.SimpleHTTPRequestHandler.do_GET(self)
def log_error(self, format, *args):
if self.path == '/favicon.ico':
return
self.log_message("While processing %s: ", self.path)
def log_request(self, code='-', size='-'):
# Dont spam the console unless it is important.
pass
class Server(SocketServer.ThreadingMixIn, BaseHTTPServer.HTTPServer):
def __init__(self, *args, **kwargs):
BaseHTTPServer.HTTPServer.__init__(self, *args, **kwargs)
self.next_deps_check = -1
self.deps = None
def Main(args):
parser = optparse.OptionParser()
parser.add_option('--port',
action='store',
type='int',
default=DEFAULT_PORT,
help='Port to serve from')
options, args = parser.parse_args()
server = Server(('', options.port), Handler)
sys.stderr.write("Now running on http://localhost:%i\n" % options.port)
server.serve_forever()
if __name__ == '__main__':
os.chdir(toplevel_dir)
sys.exit(Main(sys.argv[1:]))
|
Python
| 0.00001 |
@@ -1602,28 +1602,49 @@
-super(Handler, self)
+SimpleHTTPServer.SimpleHTTPRequestHandler
.sen
@@ -1654,12 +1654,18 @@
espo
-sne(
+nse(self,
code
|
e195aef0fa870bf0f471be99a0144a59fdcc5b97
|
Create norm_distri_of_proj_valu.py
|
norm_distri_of_proj_valu.py
|
norm_distri_of_proj_valu.py
|
Python
| 0.000005 |
@@ -0,0 +1,509 @@
+import pandas as pd%0Aimport numpy as np%0Aimport matplotlib.pyplot as plt%0A%25matplotlib inline%0Aimport math%0A%0Ax_train = pd.read_csv(%22Train.csv%22)%0Ax_test = pd.read_csv(%22Test.csv%22)%0A%0Adef log_method(x):%0A if x == 0:%0A return 0%0A return math.log(x,2)%0A%0Atest = x_train%5B%22Project_Valuation%22%5D.order()%0A%0Atest = test.apply(lambda x: log_method(x))%0A%0Amean = sum(test) / len(test)%0Avarience = sum((average - value) ** 2 for value in test) / len(test)%0Asigma = math.sqrt(variance)%0Aplt.plot(test,mlab.normpdf(test,mean,sigma))%0A
|
|
3724e828ea7c0aa2a910db16c1392390f7c9f7a8
|
add a simple schema building tool
|
spyne/test/interface/build_schema.py
|
spyne/test/interface/build_schema.py
|
Python
| 0 |
@@ -0,0 +1,268 @@
+#!/usr/bin/env python%0A%0A# This can be used to debug invalid Xml Schema documents.%0A%0Aimport sys%0A%0Afrom lxml import etree%0A%0Aif len(sys.argv) != 2:%0A print %22Usage: %25s %3Cpath_to_xsd_file%3E%22 %25 sys.argv%5B0%5D%0A sys.exit(1)%0A%0Af = open(sys.argv%5B1%5D)%0A%0Aetree.XMLSchema(etree.parse(f))%0A
|
|
56a8250baa197285a5727dfbca12adaab81238ab
|
Add a snippet.
|
python/tkinter/python3/menu_checkbutton.py
|
python/tkinter/python3/menu_checkbutton.py
|
Python
| 0.000002 |
@@ -0,0 +1,2209 @@
+#!/usr/bin/env python3%0A# -*- coding: utf-8 -*-%0A%0A# Copyright (c) 2016 J%C3%A9r%C3%A9mie DECOCK (http://www.jdhp.org)%0A%0A# Permission is hereby granted, free of charge, to any person obtaining a copy%0A# of this software and associated documentation files (the %22Software%22), to deal%0A# in the Software without restriction, including without limitation the rights%0A# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell%0A# copies of the Software, and to permit persons to whom the Software is%0A# furnished to do so, subject to the following conditions:%0A%0A# The above copyright notice and this permission notice shall be included in%0A# all copies or substantial portions of the Software.%0A %0A# THE SOFTWARE IS PROVIDED %22AS IS%22, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR%0A# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,%0A# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE%0A# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER%0A# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,%0A# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN%0A# THE SOFTWARE.%0A%0A# See also: http://effbot.org/tkinterbook/checkbutton.htm%0A%0Aimport tkinter as tk%0A%0Aroot = tk.Tk()%0A%0Atest_var = tk.IntVar()%0A#test_var.set(1) # Initialize%0A%0Adef callback():%0A print(%22var = %22, test_var.get())%0A%0A# CREATE A TOPLEVEL MENU ######################################################%0A%0Amenubar = tk.Menu(root)%0A%0A# CREATE A PULLDOWN MENU ######################################################%0A#%0A# tearoff:%0A# %22tearoff=1%22 permet %C3%A0 l'utilisateur de d%C3%A9tacher le sous menu dans une%0A# fen%C3%AAtre %C3%A0 part.%0A%0Afile_menu = tk.Menu(menubar, tearoff=0)%0Afile_menu.add_checkbutton(label=%22Checkbutton test%22, variable=test_var, command=callback)%0A%0Amenubar.add_cascade(label=%22Test%22, menu=file_menu)%0A%0A# DISPLAY THE MENU ############################################################%0A#%0A# The config method is used to attach the menu to the root window. The%0A# contents of that menu is used to create a menubar at the top of the root%0A# window. There is no need to pack the menu, since it is automatically%0A# displayed by Tkinter.%0A%0Aroot.config(menu=menubar)%0A%0Aroot.mainloop()%0A%0A
|
|
fa3a02e6660ce556defc2f2c6008c6eb24eb71c1
|
Add a simple sampler for playing wav files triggered by note on messages
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py
|
Sketches/JT/Jam/library/trunk/Kamaelia/Apps/Jam/Audio/Sampler.py
|
Python
| 0 |
@@ -0,0 +1,3108 @@
+import time%0Aimport wave%0Aimport pygame%0Aimport numpy%0Aimport Axon%0Afrom Axon.SchedulingComponent import SchedulingComponent%0A%0Aclass WavVoice(SchedulingComponent):%0A bufferSize = 1024%0A def __init__(self, fileName, **argd):%0A super(WavVoice, self).__init__(**argd)%0A%0A self.on = False%0A%0A self.wavFile = wave.open(fileName)%0A self.sampleRate = self.wavFile.getframerate()%0A self.period = float(self.bufferSize)/self.sampleRate%0A%0A self.frame = 0%0A%0A self.lastSendTime = time.time()%0A self.scheduleAbs(%22Send%22, self.lastSendTime + self.period)%0A%0A def main(self):%0A while 1:%0A if self.dataReady(%22inbox%22):%0A address, arguments = self.recv(%22inbox%22)%0A address = address.split(%22/%22)%5B-1%5D%0A if address == %22On%22:%0A self.on = True%0A self.wavFile.rewind()%0A self.frame = 0%0A if address == %22Off%22:%0A self.on = False%0A%0A if self.dataReady(%22event%22):%0A self.recv(%22event%22)%0A if self.on:%0A if self.frame %3C self.wavFile.getnframes():%0A sample = self.wavFile.readframes(self.bufferSize)%0A sample = numpy.frombuffer(sample, dtype=%22int16%22)%0A self.frame += len(sample)%0A if len(sample) %3C self.bufferSize:%0A # Pad with zeroes%0A padSize = self.bufferSize - len(sample)%0A sample = numpy.append(sample, numpy.zeros(padSize))%0A # Convert to float%0A sample = sample.astype(%22float64%22)%0A # Scale to -1 - 1%0A sample /= 2**(8 * self.wavFile.getsampwidth() - 1)%0A else:%0A sample = numpy.zeros(self.bufferSize)%0A else:%0A sample = numpy.zeros(self.bufferSize)%0A self.send(sample, %22outbox%22)%0A self.lastSendTime += self.period%0A self.scheduleAbs(%22Send%22, self.lastSendTime + self.period)%0A%0A if not self.anyReady():%0A self.pause()%0A%0Aif __name__ == %22__main__%22:%0A from Kamaelia.Chassis.Pipeline import Pipeline%0A from Kamaelia.Apps.Jam.Util.Numpy import TypeConverter%0A from Kamaelia.Codec.Vorbis import AOAudioPlaybackAdaptor%0A from Kamaelia.Util.PureTransformer import PureTransformer%0A from Kamaelia.Apps.Jam.UI.StepSequencer import StepSequencer%0A from Kamaelia.Apps.Jam.Audio.Synth import Synth%0A from Kamaelia.Apps.Jam.Audio.Polyphony import Targetter%0A%0A files = %5B%22Ride%22, %22HH%22, %22Snare%22, %22Kick%22%5D%0A files = %5B%22/home/joe/Desktop/%25s.wav%22%25fileName for fileName in files%5D%0A def voiceGenerator():%0A for i in range(4):%0A yield WavVoice(files%5Bi%5D)%0A%0A Pipeline(StepSequencer(stepsPerBeat=4), Synth(voiceGenerator, polyphoniser=Targetter, polyphony=4), PureTransformer(lambda x:x*(2**15-1)), TypeConverter(type=%22int16%22), AOAudioPlaybackAdaptor()).run()%0A %0A %0A %0A
|
|
bff1e954213fb7592505c94294eb3800a8b199c3
|
Update patternMatch.py
|
TechInterviews/Python/patternMatch.py
|
TechInterviews/Python/patternMatch.py
|
import sys
import re
def stripSlashes(path):
if path.startswith('/'):
path = path[1:]
if path.endswith('/'):
path = path[:-1]
return path
def findBestWildCardMatch(patterns):
pass
def getRePattern(pattern):
return pattern.replace(',', '/').replace('*', '[a-zA-Z0-9_]*')
def findBestMatch(patterns, paths):
result = []
temp = []
for path in paths:
temp.clear()
for pattern in patterns:
rePattern = getRePattern(pattern)
if re.search(rePattern, stripSlashes(path)):
temp.append(pattern)
if len(temp) > 1:
result.append(findBestWildCardMatch(temp))
elif len(temp) == 0:
result.append("NO MATCH FOUND")
return result
#['foot', 'fell', 'fastest']
# Example to call this program: python34 patternMatch.py <input_file> output_file
def main(args):
input_file = open(args[1], 'r')
output_file = open(args[2], 'w')
pattern_list = []
path_list = []
# Expects correct format in file: int N followed by pattern lines then int M followed by path lines.
N = int(input_file.readline())
for j in range(N):
pattern_list.append(input_file.readline())
M = int(input_file.readline())
for i in range(M):
path_list.append(input_file.readline())
print(findBestMatch(pattern_list, path_list))
input_file.close()
output_file.close()
if __name__ == '__main__':
main(sys.argv)
|
Python
| 0 |
@@ -15,16 +15,62 @@
ort re%0A%0A
+# Strip only the beginning and ending slashes%0A
def stri
@@ -240,24 +240,180 @@
(patterns):%0A
+ #The best match is wildcards that are rightmost%0A #Get the positions of the * and add them to get the largest number to figure out which is rightmost%0A
pass%0A%0Ade
|
bbe0cf1666b4706973bfba73ed77126581026057
|
add new test case to test add image from local file system.
|
integrationtest/vm/virt_plus/other/test_add_local_image.py
|
integrationtest/vm/virt_plus/other/test_add_local_image.py
|
Python
| 0 |
@@ -0,0 +1,1865 @@
+'''%0D%0A%0D%0ANew Integration Test for add image from MN local URI.%0D%0A%0D%0AThe file should be placed in MN.%0D%0A%0D%0A@author: Youyk%0D%0A'''%0D%0A%0D%0Aimport os%0D%0Aimport time%0D%0Aimport zstackwoodpecker.test_util as test_util%0D%0Aimport zstackwoodpecker.test_state as test_state%0D%0Aimport zstackwoodpecker.test_lib as test_lib%0D%0Aimport zstackwoodpecker.operations.resource_operations as res_ops%0D%0Aimport zstackwoodpecker.operations.image_operations as img_ops%0D%0Aimport zstackwoodpecker.zstack_test.zstack_test_image as zstack_image_header%0D%0A%0D%0Atest_stub = test_lib.lib_get_test_stub()%0D%0Atest_obj_dict = test_state.TestStateDict()%0D%0Atest_image = '/tmp/zstack_wp_test_local_uri.img'%0D%0A%0D%0Adef test():%0D%0A os.system('dd if=/dev/zero of=%25s bs=1M count=1 seek=300' %25 test_image)%0D%0A time.sleep(10)%0D%0A image_name = 'test-image-%25s' %25 time.time()%0D%0A image_option = test_util.ImageOption()%0D%0A image_option.set_name(image_name)%0D%0A image_option.set_description('test image which is upload from local filesystem.')%0D%0A image_option.set_url('file://%25s' %25 test_image)%0D%0A bs = res_ops.query_resource(res_ops.BACKUP_STORAGE)%5B0%5D%0D%0A image_option.set_backup_storage_uuid_list(%5Bbs.uuid%5D)%0D%0A image_option.set_format('raw')%0D%0A image_option.set_mediaType('RootVolumeTemplate')%0D%0A image_inv = img_ops.add_root_volume_template(image_option)%0D%0A time.sleep(10)%0D%0A image = zstack_image_header.ZstackTestImage()%0D%0A image.set_creation_option(image_option)%0D%0A image.set_image(image_inv)%0D%0A test_obj_dict.add_image(image)%0D%0A image.check()%0D%0A%0D%0A vm = test_stub.create_vm(image_name = image_name)%0D%0A vm.destroy()%0D%0A image.delete()%0D%0A os.system('rm -f %25s' %25 test_image)%0D%0A test_util.test_pass('Test adding image from local stroage pass.')%0D%0A%0D%0A#Will be called only if exception happens in test().%0D%0Adef error_cleanup():%0D%0A test_lib.lib_error_cleanup(test_obj_dict)%0D%0A os.system('rm -f %25s' %25 test_image)%0D%0A
|
|
bfdcebfb287b6c3495e74888ace0409f47b530c9
|
add testGroup script
|
ros_ws/src/crazyswarm/scripts/testGroup.py
|
ros_ws/src/crazyswarm/scripts/testGroup.py
|
Python
| 0.000001 |
@@ -0,0 +1,453 @@
+#!/usr/bin/env python%0A%0Aimport numpy as np%0Afrom pycrazyswarm import *%0A%0AZ = 1.5%0A%0Aif __name__ == %22__main__%22:%0A swarm = Crazyswarm()%0A timeHelper = swarm.timeHelper%0A allcfs = swarm.allcfs%0A%0A allcfs.crazyfliesById%5B9%5D.setGroup(1)%0A allcfs.crazyfliesById%5B10%5D.setGroup(2)%0A%0A allcfs.takeoff(targetHeight=Z, duration=1.0 + Z, group = 1)%0A timeHelper.sleep(1.5 + Z)%0A allcfs.land(targetHeight=0.06, duration=1.0 + Z)%0A timeHelper.sleep(1.5 + Z)%0A
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.