commit
stringlengths
40
40
old_file
stringlengths
5
117
new_file
stringlengths
5
117
old_contents
stringlengths
0
1.93k
new_contents
stringlengths
19
3.3k
subject
stringlengths
17
320
message
stringlengths
18
3.28k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
7
42.4k
completion
stringlengths
152
6.66k
prompt
stringlengths
21
3.65k
d1afc8b673595accb854639b311bbdd49be56022
server/LikeLines/debug.py
server/LikeLines/debug.py
""" Debug Blueprints. """ from flask import Blueprint, current_app, redirect, jsonify, url_for debug_pages = Blueprint('debug', __name__) @debug_pages.route("/clear_all") def clear_all(): mongo = current_app.mongo mongo.db.userSessions.remove() mongo.db.interactionSessions.remove() return redirect(url_for('destroy_session')) @debug_pages.route("/dump") def dump_session(): mongo = current_app.mongo return jsonify({ 'userSessions': list(mongo.db.userSessions.find()), 'interactionSessions': list(mongo.db.interactionSessions.find()), })
""" Debug Blueprints. """ from flask import Blueprint, current_app, redirect, jsonify, url_for, request debug_pages = Blueprint('debug', __name__) @debug_pages.route("/clear_all", methods=['GET', 'POST']) def clear_all(): if request.method == 'GET': return '<form method="POST"><input type="submit" value="CLEAR DATABASE"></form>' else: mongo = current_app.mongo mongo.db.userSessions.remove() mongo.db.interactionSessions.remove() return redirect(url_for('destroy_session')) @debug_pages.route("/dump") def dump_session(): mongo = current_app.mongo return jsonify({ 'userSessions': list(mongo.db.userSessions.find()), 'interactionSessions': list(mongo.db.interactionSessions.find()), })
Make it harder to accidentally empty the database
Make it harder to accidentally empty the database
Python
mit
ShinNoNoir/likelines-player,ShinNoNoir/likelines-player,ShinNoNoir/likelines-player
<REPLACE_OLD> url_for debug_pages <REPLACE_NEW> url_for, request debug_pages <REPLACE_END> <REPLACE_OLD> __name__) @debug_pages.route("/clear_all") def <REPLACE_NEW> __name__) @debug_pages.route("/clear_all", methods=['GET', 'POST']) def <REPLACE_END> <INSERT> if request.method == 'GET': return '<form method="POST"><input type="submit" value="CLEAR DATABASE"></form>' else: <INSERT_END> <INSERT> <INSERT_END> <INSERT> <INSERT_END> <INSERT> <INSERT_END> <|endoftext|> """ Debug Blueprints. """ from flask import Blueprint, current_app, redirect, jsonify, url_for, request debug_pages = Blueprint('debug', __name__) @debug_pages.route("/clear_all", methods=['GET', 'POST']) def clear_all(): if request.method == 'GET': return '<form method="POST"><input type="submit" value="CLEAR DATABASE"></form>' else: mongo = current_app.mongo mongo.db.userSessions.remove() mongo.db.interactionSessions.remove() return redirect(url_for('destroy_session')) @debug_pages.route("/dump") def dump_session(): mongo = current_app.mongo return jsonify({ 'userSessions': list(mongo.db.userSessions.find()), 'interactionSessions': list(mongo.db.interactionSessions.find()), })
Make it harder to accidentally empty the database """ Debug Blueprints. """ from flask import Blueprint, current_app, redirect, jsonify, url_for debug_pages = Blueprint('debug', __name__) @debug_pages.route("/clear_all") def clear_all(): mongo = current_app.mongo mongo.db.userSessions.remove() mongo.db.interactionSessions.remove() return redirect(url_for('destroy_session')) @debug_pages.route("/dump") def dump_session(): mongo = current_app.mongo return jsonify({ 'userSessions': list(mongo.db.userSessions.find()), 'interactionSessions': list(mongo.db.interactionSessions.find()), })
eb0714767cf5c0fd89ff4e50e22445a5e436f94c
iopath/tabular/tabular_io.py
iopath/tabular/tabular_io.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Any, Iterable from iopath.common.file_io import PathHandler class TabularUriParser: def parse_uri(self, uri: str) -> None: pass class TabularPathHandler(PathHandler): def _opent( self, path: str, mode: str = "r", buffering: int = 32, **kwargs: Any ) -> Iterable[Any]: assert mode == "r"
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Any from iopath.common.file_io import PathHandler, TabularIO class TabularUriParser: def parse_uri(self, uri: str) -> None: pass class TabularPathHandler(PathHandler): def _opent( self, path: str, mode: str = "r", buffering: int = 32, **kwargs: Any ) -> TabularIO: assert mode == "r"
Update type signature of AIRStorePathHandler.opent()
Update type signature of AIRStorePathHandler.opent() Summary: The previous diff updated the type signature of the `PathHandler.opent()` method to return a custom context manager. Here, we update the return type of the overriden `AIRStorePathHandler.opent()` method to return an implementation of the `PathHandlerContext` protocol, namely the `AIRStoreRowDataLoader` instead of `Iterable[Any]` to allow Pyre to carry out static type checking. Reviewed By: mackorone Differential Revision: D33833561 fbshipit-source-id: f642110645b147a955f4375fc24d4c29cdca6f26
Python
mit
facebookresearch/iopath,facebookresearch/iopath
<REPLACE_OLD> Any, Iterable from <REPLACE_NEW> Any from <REPLACE_END> <REPLACE_OLD> PathHandler class <REPLACE_NEW> PathHandler, TabularIO class <REPLACE_END> <REPLACE_OLD> Iterable[Any]: <REPLACE_NEW> TabularIO: <REPLACE_END> <|endoftext|> # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Any from iopath.common.file_io import PathHandler, TabularIO class TabularUriParser: def parse_uri(self, uri: str) -> None: pass class TabularPathHandler(PathHandler): def _opent( self, path: str, mode: str = "r", buffering: int = 32, **kwargs: Any ) -> TabularIO: assert mode == "r"
Update type signature of AIRStorePathHandler.opent() Summary: The previous diff updated the type signature of the `PathHandler.opent()` method to return a custom context manager. Here, we update the return type of the overriden `AIRStorePathHandler.opent()` method to return an implementation of the `PathHandlerContext` protocol, namely the `AIRStoreRowDataLoader` instead of `Iterable[Any]` to allow Pyre to carry out static type checking. Reviewed By: mackorone Differential Revision: D33833561 fbshipit-source-id: f642110645b147a955f4375fc24d4c29cdca6f26 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved. from typing import Any, Iterable from iopath.common.file_io import PathHandler class TabularUriParser: def parse_uri(self, uri: str) -> None: pass class TabularPathHandler(PathHandler): def _opent( self, path: str, mode: str = "r", buffering: int = 32, **kwargs: Any ) -> Iterable[Any]: assert mode == "r"
98550946e8bc0da9a1ecdec8f0e53490f8fd5e91
conftest.py
conftest.py
import shutil import pytest try: import six except ImportError: from django.utils import six from django.conf import settings def teardown_assets_directory(): # Removing the temporary TEMP_DIR. Ensure we pass in unicode # so that it will successfully remove temp trees containing # non-ASCII filenames on Windows. (We're assuming the temp dir # name itself does not contain non-ASCII characters.) shutil.rmtree(six.text_type(settings.TEMP_DIR)) @pytest.fixture(scope="session", autouse=True) def assets_directory(request): request.addfinalizer(teardown_assets_directory)
import shutil import pytest try: import six except ImportError: from django.utils import six from django.conf import settings def teardown_assets_directory(): # Removing the temporary TEMP_DIR. Ensure we pass in unicode # so that it will successfully remove temp trees containing # non-ASCII filenames on Windows. (We're assuming the temp dir # name itself does not contain non-ASCII characters.) shutil.rmtree(six.text_type(settings.TEMP_DIR)) @pytest.fixture(scope="session", autouse=True) def assets_directory(request): request.addfinalizer(teardown_assets_directory) def get_collect_ignore(): mapping = { 'widgy.contrib.widgy_mezzanine': ['widgy/contrib/widgy_mezzanine/'], 'widgy.contrib.form_builder': ['widgy/contrib/form_builder/'], 'widgy.contrib.page_builder': ['widgy/contrib/page_builder/'], 'widgy.contrib.urlconf_include': ['widgy/contrib/urlconf_include/'], 'widgy.contrib.widgy_i18n': ['widgy/contrib/urlconf_include/'], } acc = [] for app, path_list in mapping.items(): if app not in settings.INSTALLED_APPS: acc.extend(path_list) return acc collect_ignore = get_collect_ignore()
Make pytest autodiscover tests depending on the INSTALLED_APPS
Make pytest autodiscover tests depending on the INSTALLED_APPS
Python
apache-2.0
j00bar/django-widgy,j00bar/django-widgy,j00bar/django-widgy
<REPLACE_OLD> request.addfinalizer(teardown_assets_directory) <REPLACE_NEW> request.addfinalizer(teardown_assets_directory) def get_collect_ignore(): mapping = { 'widgy.contrib.widgy_mezzanine': ['widgy/contrib/widgy_mezzanine/'], 'widgy.contrib.form_builder': ['widgy/contrib/form_builder/'], 'widgy.contrib.page_builder': ['widgy/contrib/page_builder/'], 'widgy.contrib.urlconf_include': ['widgy/contrib/urlconf_include/'], 'widgy.contrib.widgy_i18n': ['widgy/contrib/urlconf_include/'], } acc = [] for app, path_list in mapping.items(): if app not in settings.INSTALLED_APPS: acc.extend(path_list) return acc collect_ignore = get_collect_ignore() <REPLACE_END> <|endoftext|> import shutil import pytest try: import six except ImportError: from django.utils import six from django.conf import settings def teardown_assets_directory(): # Removing the temporary TEMP_DIR. Ensure we pass in unicode # so that it will successfully remove temp trees containing # non-ASCII filenames on Windows. (We're assuming the temp dir # name itself does not contain non-ASCII characters.) shutil.rmtree(six.text_type(settings.TEMP_DIR)) @pytest.fixture(scope="session", autouse=True) def assets_directory(request): request.addfinalizer(teardown_assets_directory) def get_collect_ignore(): mapping = { 'widgy.contrib.widgy_mezzanine': ['widgy/contrib/widgy_mezzanine/'], 'widgy.contrib.form_builder': ['widgy/contrib/form_builder/'], 'widgy.contrib.page_builder': ['widgy/contrib/page_builder/'], 'widgy.contrib.urlconf_include': ['widgy/contrib/urlconf_include/'], 'widgy.contrib.widgy_i18n': ['widgy/contrib/urlconf_include/'], } acc = [] for app, path_list in mapping.items(): if app not in settings.INSTALLED_APPS: acc.extend(path_list) return acc collect_ignore = get_collect_ignore()
Make pytest autodiscover tests depending on the INSTALLED_APPS import shutil import pytest try: import six except ImportError: from django.utils import six from django.conf import settings def teardown_assets_directory(): # Removing the temporary TEMP_DIR. Ensure we pass in unicode # so that it will successfully remove temp trees containing # non-ASCII filenames on Windows. (We're assuming the temp dir # name itself does not contain non-ASCII characters.) shutil.rmtree(six.text_type(settings.TEMP_DIR)) @pytest.fixture(scope="session", autouse=True) def assets_directory(request): request.addfinalizer(teardown_assets_directory)
459546a9cedb8e9cf3bee67edb4a76d37874f03b
tests/test_athletics.py
tests/test_athletics.py
from nose.tools import ok_, eq_ from pennathletics.athletes import get_roster, get_player class TestAthletics(): def test_roster(self): ok_(get_roster("m-baskbl", 2015) != []) def test_player_empty(self): ok_(get_player("m-baskbl", 2014) != []) def test_player_number(self): eq_(get_player("m-baskbl", 2013, jersey=1)[0].height, "6'2\"")
from nose.tools import ok_, eq_ from pennathletics.athletes import get_roster, get_player class TestAthletics(): def test_roster(self): ok_(get_roster("m-baskbl", 2015) != []) def test_player_empty(self): ok_(get_player("m-baskbl", 2014) != []) def test_player_number(self): eq_(get_player("m-baskbl", 2013, jersey=1)[0].height, "6'2\"") def test_player_hometown(self): player = get_player("m-baskbl", 2012, homeTown="Belfast, Ireland")[0] eq_(player.weight, '210 lbs') def test_player_softball(self): # 19 players on the 2013 softball team eq_(len(get_roster("w-softbl", 2013)), 19)
Add a few more tests for variety
Add a few more tests for variety
Python
mit
pennlabs/pennathletics
<REPLACE_OLD> "6'2\"") <REPLACE_NEW> "6'2\"") def test_player_hometown(self): player = get_player("m-baskbl", 2012, homeTown="Belfast, Ireland")[0] eq_(player.weight, '210 lbs') def test_player_softball(self): # 19 players on the 2013 softball team eq_(len(get_roster("w-softbl", 2013)), 19) <REPLACE_END> <|endoftext|> from nose.tools import ok_, eq_ from pennathletics.athletes import get_roster, get_player class TestAthletics(): def test_roster(self): ok_(get_roster("m-baskbl", 2015) != []) def test_player_empty(self): ok_(get_player("m-baskbl", 2014) != []) def test_player_number(self): eq_(get_player("m-baskbl", 2013, jersey=1)[0].height, "6'2\"") def test_player_hometown(self): player = get_player("m-baskbl", 2012, homeTown="Belfast, Ireland")[0] eq_(player.weight, '210 lbs') def test_player_softball(self): # 19 players on the 2013 softball team eq_(len(get_roster("w-softbl", 2013)), 19)
Add a few more tests for variety from nose.tools import ok_, eq_ from pennathletics.athletes import get_roster, get_player class TestAthletics(): def test_roster(self): ok_(get_roster("m-baskbl", 2015) != []) def test_player_empty(self): ok_(get_player("m-baskbl", 2014) != []) def test_player_number(self): eq_(get_player("m-baskbl", 2013, jersey=1)[0].height, "6'2\"")
eee3d08ae24174ae0c44853a688e30d3adaf13c5
cryptex/pl_calculator.py
cryptex/pl_calculator.py
from decimal import Decimal from cryptex.transaction import Deposit, Withdrawal from cryptex.trade import Buy, Sell from functools import partial class PLCalculator(object): def __init__(self, exchange): self.exchange = exchange @staticmethod def convert_transaction(market, tx): """ Convert to buy orders or sell trades of 0 cost and 0 price """ base, counter = market if isinstance(tx, Deposit): trade_cls = Buy else: trade_cls = Sell return trade_cls(None, base, counter, tx.datetime, None, tx.amount, Decimal('0')) def _get_trades(self, market): """ Returns all trades in a particular market along with transaction of the base currency, sorted by time. """ base, counter = market trades = [t for t in self.exchange.get_my_trades() if t.base_currency == base and t.counter_currency == counter] txs = [t for t in self.exchange.get_my_transactions() if t.currency == base] tx_trades = map(partial(PLCalculator.convert_transaction, market), txs) all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime) return all_trades def unrealized_pl(self, market): base, counter = market trades = self._get_trades(market) def merge_trades(acc, trade): if isinstance(trade, Buy): new_trade = Buy(None, base, counter, trade.datetime, None, trade.amount, trade.price) acc.append(new_trade) else: oldest_buy = None total_amount = Decimal('0') while total_amount < trade.amount: oldest_buy = acc.pop() total_amount += oldest_buy.amount buy_amount = trade.amount - total_amount if buy_amount != Decimal('0'): new_trade = Buy(None, base, counter, oldest_buy.datetime, None, buy_amount, oldest_buy.price) acc.append(new_trade) return acc return reduce(merge_trades, trades, [])
Add PL Calculator for unrealized profit and loss calculation
Add PL Calculator for unrealized profit and loss calculation
Python
mit
coink/cryptex
<REPLACE_OLD> <REPLACE_NEW> from decimal import Decimal from cryptex.transaction import Deposit, Withdrawal from cryptex.trade import Buy, Sell from functools import partial class PLCalculator(object): def __init__(self, exchange): self.exchange = exchange @staticmethod def convert_transaction(market, tx): """ Convert to buy orders or sell trades of 0 cost and 0 price """ base, counter = market if isinstance(tx, Deposit): trade_cls = Buy else: trade_cls = Sell return trade_cls(None, base, counter, tx.datetime, None, tx.amount, Decimal('0')) def _get_trades(self, market): """ Returns all trades in a particular market along with transaction of the base currency, sorted by time. """ base, counter = market trades = [t for t in self.exchange.get_my_trades() if t.base_currency == base and t.counter_currency == counter] txs = [t for t in self.exchange.get_my_transactions() if t.currency == base] tx_trades = map(partial(PLCalculator.convert_transaction, market), txs) all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime) return all_trades def unrealized_pl(self, market): base, counter = market trades = self._get_trades(market) def merge_trades(acc, trade): if isinstance(trade, Buy): new_trade = Buy(None, base, counter, trade.datetime, None, trade.amount, trade.price) acc.append(new_trade) else: oldest_buy = None total_amount = Decimal('0') while total_amount < trade.amount: oldest_buy = acc.pop() total_amount += oldest_buy.amount buy_amount = trade.amount - total_amount if buy_amount != Decimal('0'): new_trade = Buy(None, base, counter, oldest_buy.datetime, None, buy_amount, oldest_buy.price) acc.append(new_trade) return acc return reduce(merge_trades, trades, []) <REPLACE_END> <|endoftext|> from decimal import Decimal from cryptex.transaction import Deposit, Withdrawal from cryptex.trade import Buy, Sell from functools import partial class PLCalculator(object): def __init__(self, exchange): self.exchange = exchange @staticmethod def convert_transaction(market, tx): """ Convert to buy orders or sell trades of 0 cost and 0 price """ base, counter = market if isinstance(tx, Deposit): trade_cls = Buy else: trade_cls = Sell return trade_cls(None, base, counter, tx.datetime, None, tx.amount, Decimal('0')) def _get_trades(self, market): """ Returns all trades in a particular market along with transaction of the base currency, sorted by time. """ base, counter = market trades = [t for t in self.exchange.get_my_trades() if t.base_currency == base and t.counter_currency == counter] txs = [t for t in self.exchange.get_my_transactions() if t.currency == base] tx_trades = map(partial(PLCalculator.convert_transaction, market), txs) all_trades = sorted(trades + tx_trades, key=lambda x: x.datetime) return all_trades def unrealized_pl(self, market): base, counter = market trades = self._get_trades(market) def merge_trades(acc, trade): if isinstance(trade, Buy): new_trade = Buy(None, base, counter, trade.datetime, None, trade.amount, trade.price) acc.append(new_trade) else: oldest_buy = None total_amount = Decimal('0') while total_amount < trade.amount: oldest_buy = acc.pop() total_amount += oldest_buy.amount buy_amount = trade.amount - total_amount if buy_amount != Decimal('0'): new_trade = Buy(None, base, counter, oldest_buy.datetime, None, buy_amount, oldest_buy.price) acc.append(new_trade) return acc return reduce(merge_trades, trades, [])
Add PL Calculator for unrealized profit and loss calculation
623c56c14aa1d1c47b081f607701323d00903dc9
gather/topic/api.py
gather/topic/api.py
# -*- coding:utf-8 -*- from gather.api import need_auth, EXCLUDE_COLUMNS from gather.extensions import api_manager from gather.topic.models import Topic, Reply bp = api_manager.create_api_blueprint( Topic, methods=["GET", "POST"], preprocessors={ 'POST': [need_auth], }, include_methods=["have_read"], exclude_columns=EXCLUDE_COLUMNS ) def _update_topic_updated(result=None, **kw): if not result: return reply = Reply.query.get(result["id"]) reply.topic.updated = reply.created reply.topic.clear_read() reply.topic.save() reply_bp = api_manager.create_api_blueprint( Reply, methods=["POST"], preprocessors={ 'POST': [need_auth], }, postprocessors={ 'POST': [_update_topic_updated] }, exclude_columns=EXCLUDE_COLUMNS )
# -*- coding:utf-8 -*- from flask import g, jsonify from gather.api import need_auth, EXCLUDE_COLUMNS from gather.extensions import api_manager from gather.topic.models import Topic, Reply bp = api_manager.create_api_blueprint( Topic, methods=["GET", "POST"], preprocessors={ 'POST': [need_auth], }, include_methods=["have_read"], exclude_columns=EXCLUDE_COLUMNS ) @bp.route("/topic/<int:topic_id>/mark_read") def _mark_read_for_topic(topic_id): need_auth() topic = Topic.query.get_or_404(topic_id) topic.mark_read(g.token_user) return jsonify({"code": 200}) def _update_topic_updated(result=None, **kw): if not result: return reply = Reply.query.get(result["id"]) reply.topic.updated = reply.created reply.topic.clear_read() reply.topic.save() reply_bp = api_manager.create_api_blueprint( Reply, methods=["POST"], preprocessors={ 'POST': [need_auth], }, postprocessors={ 'POST': [_update_topic_updated] }, exclude_columns=EXCLUDE_COLUMNS )
Add API to mark topic as reader
Add API to mark topic as reader
Python
mit
whtsky/Gather,whtsky/Gather
<REPLACE_OLD> -*- from <REPLACE_NEW> -*- from flask import g, jsonify from <REPLACE_END> <REPLACE_OLD> exclude_columns=EXCLUDE_COLUMNS ) def <REPLACE_NEW> exclude_columns=EXCLUDE_COLUMNS ) @bp.route("/topic/<int:topic_id>/mark_read") def _mark_read_for_topic(topic_id): need_auth() topic = Topic.query.get_or_404(topic_id) topic.mark_read(g.token_user) return jsonify({"code": 200}) def <REPLACE_END> <|endoftext|> # -*- coding:utf-8 -*- from flask import g, jsonify from gather.api import need_auth, EXCLUDE_COLUMNS from gather.extensions import api_manager from gather.topic.models import Topic, Reply bp = api_manager.create_api_blueprint( Topic, methods=["GET", "POST"], preprocessors={ 'POST': [need_auth], }, include_methods=["have_read"], exclude_columns=EXCLUDE_COLUMNS ) @bp.route("/topic/<int:topic_id>/mark_read") def _mark_read_for_topic(topic_id): need_auth() topic = Topic.query.get_or_404(topic_id) topic.mark_read(g.token_user) return jsonify({"code": 200}) def _update_topic_updated(result=None, **kw): if not result: return reply = Reply.query.get(result["id"]) reply.topic.updated = reply.created reply.topic.clear_read() reply.topic.save() reply_bp = api_manager.create_api_blueprint( Reply, methods=["POST"], preprocessors={ 'POST': [need_auth], }, postprocessors={ 'POST': [_update_topic_updated] }, exclude_columns=EXCLUDE_COLUMNS )
Add API to mark topic as reader # -*- coding:utf-8 -*- from gather.api import need_auth, EXCLUDE_COLUMNS from gather.extensions import api_manager from gather.topic.models import Topic, Reply bp = api_manager.create_api_blueprint( Topic, methods=["GET", "POST"], preprocessors={ 'POST': [need_auth], }, include_methods=["have_read"], exclude_columns=EXCLUDE_COLUMNS ) def _update_topic_updated(result=None, **kw): if not result: return reply = Reply.query.get(result["id"]) reply.topic.updated = reply.created reply.topic.clear_read() reply.topic.save() reply_bp = api_manager.create_api_blueprint( Reply, methods=["POST"], preprocessors={ 'POST': [need_auth], }, postprocessors={ 'POST': [_update_topic_updated] }, exclude_columns=EXCLUDE_COLUMNS )
d7945f0394038e9c194a2e41e6da151b679128a3
cs251tk/toolkit/process_student.py
cs251tk/toolkit/process_student.py
from cs251tk.student import remove from cs251tk.student import clone_student from cs251tk.student import stash from cs251tk.student import pull from cs251tk.student import checkout_date from cs251tk.student import record from cs251tk.student import reset from cs251tk.student import analyze def process_student( student, *, assignments, basedir, clean, date, debug, interact, no_check, no_update, specs, stogit_url ): if clean: remove(student) clone_student(student, baseurl=stogit_url) try: stash(student, no_update=no_update) pull(student, no_update=no_update) checkout_date(student, date=date) recordings = record(student, specs=specs, to_record=assignments, basedir=basedir, debug=debug, interact=interact) analysis = analyze(student, specs, check_for_branches=not no_check) if date: reset(student) return analysis, recordings except Exception as err: if debug: raise err return {'username': student, 'error': err}, []
from cs251tk.student import remove from cs251tk.student import clone_student from cs251tk.student import stash from cs251tk.student import pull from cs251tk.student import checkout_date from cs251tk.student import record from cs251tk.student import reset from cs251tk.student import analyze def process_student( student, *, assignments, basedir, clean, date, debug, interact, no_check, no_update, specs, stogit_url ): if clean: remove(student) clone_student(student, baseurl=stogit_url) try: stash(student, no_update=no_update) pull(student, no_update=no_update) checkout_date(student, date=date) recordings = record(student, specs=specs, to_record=assignments, basedir=basedir, debug=debug, interact=interact) analysis = analyze(student, specs, check_for_branches=not no_check) if date: reset(student) return analysis, recordings except Exception as err: if debug: raise err return {'username': student, 'error': err}, []
Remove extra newlines added during editing
Remove extra newlines added during editing
Python
mit
StoDevX/cs251-toolkit,StoDevX/cs251-toolkit,StoDevX/cs251-toolkit,StoDevX/cs251-toolkit
<INSERT> <INSERT_END> <INSERT> no_update=no_update) pull(student, <INSERT_END> <DELETE> pull(student, no_update=no_update) <DELETE_END> <REPLACE_OLD> interact=interact) <REPLACE_NEW> interact=interact) <REPLACE_END> <|endoftext|> from cs251tk.student import remove from cs251tk.student import clone_student from cs251tk.student import stash from cs251tk.student import pull from cs251tk.student import checkout_date from cs251tk.student import record from cs251tk.student import reset from cs251tk.student import analyze def process_student( student, *, assignments, basedir, clean, date, debug, interact, no_check, no_update, specs, stogit_url ): if clean: remove(student) clone_student(student, baseurl=stogit_url) try: stash(student, no_update=no_update) pull(student, no_update=no_update) checkout_date(student, date=date) recordings = record(student, specs=specs, to_record=assignments, basedir=basedir, debug=debug, interact=interact) analysis = analyze(student, specs, check_for_branches=not no_check) if date: reset(student) return analysis, recordings except Exception as err: if debug: raise err return {'username': student, 'error': err}, []
Remove extra newlines added during editing from cs251tk.student import remove from cs251tk.student import clone_student from cs251tk.student import stash from cs251tk.student import pull from cs251tk.student import checkout_date from cs251tk.student import record from cs251tk.student import reset from cs251tk.student import analyze def process_student( student, *, assignments, basedir, clean, date, debug, interact, no_check, no_update, specs, stogit_url ): if clean: remove(student) clone_student(student, baseurl=stogit_url) try: stash(student, no_update=no_update) pull(student, no_update=no_update) checkout_date(student, date=date) recordings = record(student, specs=specs, to_record=assignments, basedir=basedir, debug=debug, interact=interact) analysis = analyze(student, specs, check_for_branches=not no_check) if date: reset(student) return analysis, recordings except Exception as err: if debug: raise err return {'username': student, 'error': err}, []
26e0a0ce2cb8b907ca7ea7ad098c644c2213fa1b
usb/tests/test_api.py
usb/tests/test_api.py
import json from unittest import TestCase from usb import create_application from usb.models import db class APITestCase(TestCase): def setUp(self): self.app = create_application('config/test.py') self.client = self.app.test_client() db.app = self.app db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_redirect_from_index_namespace(self): pass def test_redirect_from_links_namespace(self): pass def test_create_short_link(self): pass def test_update_short_link(self): pass def test_get_list_of_short_links(self): pass def test_get_list_of_short_links_empty_db(self): response = self.client.get('/links') self.assertEqual(response.status_code, 200) data = json.loads(response.data) self.assertEqual(data, {})
import json from unittest import TestCase from usb import create_application from usb.models import db class APITestCase(TestCase): def setUp(self): self.app = create_application('config/test.py') self.client = self.app.test_client() db.app = self.app db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_redirect_from_index_namespace(self): pass def test_redirect_from_links_namespace(self): pass def test_create_short_link(self): pass def test_update_short_link(self): pass def test_get_list_of_short_links(self): pass def test_get_list_of_short_links_empty_db(self): response = self.client.get('/links') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['Content-Type'], 'application/json') data = json.loads(response.data) self.assertEqual(data, {})
Test content type for JSON API
Test content type for JSON API
Python
mit
dizpers/usb
<INSERT> self.assertEqual(response.headers['Content-Type'], 'application/json') <INSERT_END> <|endoftext|> import json from unittest import TestCase from usb import create_application from usb.models import db class APITestCase(TestCase): def setUp(self): self.app = create_application('config/test.py') self.client = self.app.test_client() db.app = self.app db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_redirect_from_index_namespace(self): pass def test_redirect_from_links_namespace(self): pass def test_create_short_link(self): pass def test_update_short_link(self): pass def test_get_list_of_short_links(self): pass def test_get_list_of_short_links_empty_db(self): response = self.client.get('/links') self.assertEqual(response.status_code, 200) self.assertEqual(response.headers['Content-Type'], 'application/json') data = json.loads(response.data) self.assertEqual(data, {})
Test content type for JSON API import json from unittest import TestCase from usb import create_application from usb.models import db class APITestCase(TestCase): def setUp(self): self.app = create_application('config/test.py') self.client = self.app.test_client() db.app = self.app db.create_all() def tearDown(self): db.session.remove() db.drop_all() def test_redirect_from_index_namespace(self): pass def test_redirect_from_links_namespace(self): pass def test_create_short_link(self): pass def test_update_short_link(self): pass def test_get_list_of_short_links(self): pass def test_get_list_of_short_links_empty_db(self): response = self.client.get('/links') self.assertEqual(response.status_code, 200) data = json.loads(response.data) self.assertEqual(data, {})
e4841c674545892dfc6a8390574cec7c2836e004
main.py
main.py
from SimpleCV import * winsize = (640,480) display = Display(winsize) video = VirtualCamera('stefan_eye.mp4', 'video') while display.isNotDone(): a = video.getImage() a.rotate(90).invert().toGray().binarize().save(display)
from SimpleCV import * winsize = (640,480) display = Display(winsize) video = VirtualCamera('stefan_eye.mp4', 'video') while display.isNotDone(): image = video.getImage().rotate(90).crop(850,50,400,400) image2 = image.colorDistance(Color.RED) blobs = image2.findBlobs() image3 = image2.grayscale() if blobs: for b in blobs: if b.isCircle(0.7) and b.radius() > 3: image.drawCircle((b.x,b.y),b.radius(),Color.YELLOW,2) image.show()
Add code to accomodate a new '3 circles' approach
Add code to accomodate a new '3 circles' approach
Python
mit
ColdSauce/Iris
<REPLACE_OLD> Display(winsize) video <REPLACE_NEW> Display(winsize) video <REPLACE_END> <REPLACE_OLD> a <REPLACE_NEW> image <REPLACE_END> <REPLACE_OLD> video.getImage() <REPLACE_NEW> video.getImage().rotate(90).crop(850,50,400,400) <REPLACE_END> <REPLACE_OLD> a.rotate(90).invert().toGray().binarize().save(display) <REPLACE_NEW> image2 = image.colorDistance(Color.RED) blobs = image2.findBlobs() image3 = image2.grayscale() if blobs: for b in blobs: if b.isCircle(0.7) and b.radius() > 3: image.drawCircle((b.x,b.y),b.radius(),Color.YELLOW,2) image.show() <REPLACE_END> <|endoftext|> from SimpleCV import * winsize = (640,480) display = Display(winsize) video = VirtualCamera('stefan_eye.mp4', 'video') while display.isNotDone(): image = video.getImage().rotate(90).crop(850,50,400,400) image2 = image.colorDistance(Color.RED) blobs = image2.findBlobs() image3 = image2.grayscale() if blobs: for b in blobs: if b.isCircle(0.7) and b.radius() > 3: image.drawCircle((b.x,b.y),b.radius(),Color.YELLOW,2) image.show()
Add code to accomodate a new '3 circles' approach from SimpleCV import * winsize = (640,480) display = Display(winsize) video = VirtualCamera('stefan_eye.mp4', 'video') while display.isNotDone(): a = video.getImage() a.rotate(90).invert().toGray().binarize().save(display)
8f0956313b140d7a0d51510cd9b4a5eec7d54570
plugins/holland.lib.lvm/tests/test_util.py
plugins/holland.lib.lvm/tests/test_util.py
import os import signal from nose.tools import * from holland.lib.lvm.util import * def test_format_bytes(): assert_equals(format_bytes(1024), '1.00KB') assert_equals(format_bytes(0), '0.00Bytes') def test_getmount(): assert_equals(getmount('/'), '/') assert_equals(getmount('/foobarbaz'), '/') def test_getdevice(): # XXX: bad hack dev = open('/etc/mtab', 'r').readline().split()[0].strip() assert_equals(getdevice('/'), dev) assert_equals(getdevice('/foobarbaz'), None) def test_relpath(): assert_raises(ValueError, relpath, '') assert_equals(relpath('/foo/bar/baz', '/foo/bar'), 'baz') assert_equals(relpath('/foo/bar/', '/foo/bar/'), os.curdir) def test_signalmanager(): sigmgr = SignalManager() sigmgr.trap(signal.SIGINT) os.kill(os.getpid(), signal.SIGINT) ok_(sigmgr.pending) assert_equals(sigmgr.pending[0], signal.SIGINT) sigmgr.restore() assert_raises(KeyboardInterrupt, os.kill, os.getpid(), signal.SIGINT)
import os import signal from nose.tools import * from holland.lib.lvm.util import * def test_format_bytes(): assert_equals(format_bytes(1024), '1.00KB') assert_equals(format_bytes(0), '0.00Bytes') def test_getmount(): assert_equals(getmount('/'), '/') assert_equals(getmount('/foobarbaz'), '/') def test_getdevice(): # XXX: bad hack dev = open('/etc/mtab', 'r').readline().split()[0].strip() assert_equals(getdevice('/'), dev) assert_equals(getdevice('/foobarbaz'), None) def test_relpath(): assert_raises(ValueError, relpath, '') assert_equals(relpath('/foo/bar/baz', '/foo/bar'), 'baz') assert_equals(relpath('/foo/bar/', '/foo/bar/'), os.curdir) def test_signalmanager(): sigmgr = SignalManager() sigmgr.trap(signal.SIGINT) os.kill(os.getpid(), signal.SIGINT) ok_(sigmgr.pending) assert_equals(sigmgr.pending[0], signal.SIGINT) sigmgr.restore() assert_raises(KeyboardInterrupt, os.kill, os.getpid(), signal.SIGINT) def test_parsebytes(): # bytes without units should be interpretted as MB bytes = parse_bytes('1024') assert_equals(bytes, 1024**3) # this should not be bytes ok_(bytes > 1024) bytes = parse_bytes('1024G') assert_equals(bytes, 1024**4)
Add test case to holland.lib.lvm for parsing snapshot-size without units
Add test case to holland.lib.lvm for parsing snapshot-size without units
Python
bsd-3-clause
m00dawg/holland,m00dawg/holland
<REPLACE_OLD> signal.SIGINT) <REPLACE_NEW> signal.SIGINT) def test_parsebytes(): # bytes without units should be interpretted as MB bytes = parse_bytes('1024') assert_equals(bytes, 1024**3) # this should not be bytes ok_(bytes > 1024) bytes = parse_bytes('1024G') assert_equals(bytes, 1024**4) <REPLACE_END> <|endoftext|> import os import signal from nose.tools import * from holland.lib.lvm.util import * def test_format_bytes(): assert_equals(format_bytes(1024), '1.00KB') assert_equals(format_bytes(0), '0.00Bytes') def test_getmount(): assert_equals(getmount('/'), '/') assert_equals(getmount('/foobarbaz'), '/') def test_getdevice(): # XXX: bad hack dev = open('/etc/mtab', 'r').readline().split()[0].strip() assert_equals(getdevice('/'), dev) assert_equals(getdevice('/foobarbaz'), None) def test_relpath(): assert_raises(ValueError, relpath, '') assert_equals(relpath('/foo/bar/baz', '/foo/bar'), 'baz') assert_equals(relpath('/foo/bar/', '/foo/bar/'), os.curdir) def test_signalmanager(): sigmgr = SignalManager() sigmgr.trap(signal.SIGINT) os.kill(os.getpid(), signal.SIGINT) ok_(sigmgr.pending) assert_equals(sigmgr.pending[0], signal.SIGINT) sigmgr.restore() assert_raises(KeyboardInterrupt, os.kill, os.getpid(), signal.SIGINT) def test_parsebytes(): # bytes without units should be interpretted as MB bytes = parse_bytes('1024') assert_equals(bytes, 1024**3) # this should not be bytes ok_(bytes > 1024) bytes = parse_bytes('1024G') assert_equals(bytes, 1024**4)
Add test case to holland.lib.lvm for parsing snapshot-size without units import os import signal from nose.tools import * from holland.lib.lvm.util import * def test_format_bytes(): assert_equals(format_bytes(1024), '1.00KB') assert_equals(format_bytes(0), '0.00Bytes') def test_getmount(): assert_equals(getmount('/'), '/') assert_equals(getmount('/foobarbaz'), '/') def test_getdevice(): # XXX: bad hack dev = open('/etc/mtab', 'r').readline().split()[0].strip() assert_equals(getdevice('/'), dev) assert_equals(getdevice('/foobarbaz'), None) def test_relpath(): assert_raises(ValueError, relpath, '') assert_equals(relpath('/foo/bar/baz', '/foo/bar'), 'baz') assert_equals(relpath('/foo/bar/', '/foo/bar/'), os.curdir) def test_signalmanager(): sigmgr = SignalManager() sigmgr.trap(signal.SIGINT) os.kill(os.getpid(), signal.SIGINT) ok_(sigmgr.pending) assert_equals(sigmgr.pending[0], signal.SIGINT) sigmgr.restore() assert_raises(KeyboardInterrupt, os.kill, os.getpid(), signal.SIGINT)
a2400b6980089803b38121e20e2d24ee2f463eb1
keyring/tests/backends/test_chainer.py
keyring/tests/backends/test_chainer.py
import pytest import keyring.backends.chainer from keyring import backend @pytest.fixture def two_keyrings(monkeypatch): def get_two(): class Keyring1(backend.KeyringBackend): priority = 1 def get_password(self, system, user): return 'ring1-{system}-{user}'.format(**locals()) def set_password(self, system, user, password): pass class Keyring2(backend.KeyringBackend): priority = 2 def get_password(self, system, user): return 'ring2-{system}-{user}'.format(**locals()) def set_password(self, system, user, password): raise NotImplementedError() return Keyring1(), Keyring2() monkeypatch.setattr('keyring.backend.get_all_keyring', get_two) class TestChainer: def test_chainer_gets_from_highest_priority(self, two_keyrings): chainer = keyring.backends.chainer.ChainerBackend() pw = chainer.get_password('alpha', 'bravo') assert pw == 'ring2-alpha-bravo'
Add a test for the chainer.
Add a test for the chainer.
Python
mit
jaraco/keyring
<REPLACE_OLD> <REPLACE_NEW> import pytest import keyring.backends.chainer from keyring import backend @pytest.fixture def two_keyrings(monkeypatch): def get_two(): class Keyring1(backend.KeyringBackend): priority = 1 def get_password(self, system, user): return 'ring1-{system}-{user}'.format(**locals()) def set_password(self, system, user, password): pass class Keyring2(backend.KeyringBackend): priority = 2 def get_password(self, system, user): return 'ring2-{system}-{user}'.format(**locals()) def set_password(self, system, user, password): raise NotImplementedError() return Keyring1(), Keyring2() monkeypatch.setattr('keyring.backend.get_all_keyring', get_two) class TestChainer: def test_chainer_gets_from_highest_priority(self, two_keyrings): chainer = keyring.backends.chainer.ChainerBackend() pw = chainer.get_password('alpha', 'bravo') assert pw == 'ring2-alpha-bravo' <REPLACE_END> <|endoftext|> import pytest import keyring.backends.chainer from keyring import backend @pytest.fixture def two_keyrings(monkeypatch): def get_two(): class Keyring1(backend.KeyringBackend): priority = 1 def get_password(self, system, user): return 'ring1-{system}-{user}'.format(**locals()) def set_password(self, system, user, password): pass class Keyring2(backend.KeyringBackend): priority = 2 def get_password(self, system, user): return 'ring2-{system}-{user}'.format(**locals()) def set_password(self, system, user, password): raise NotImplementedError() return Keyring1(), Keyring2() monkeypatch.setattr('keyring.backend.get_all_keyring', get_two) class TestChainer: def test_chainer_gets_from_highest_priority(self, two_keyrings): chainer = keyring.backends.chainer.ChainerBackend() pw = chainer.get_password('alpha', 'bravo') assert pw == 'ring2-alpha-bravo'
Add a test for the chainer.
2e88043e2f7a987469f1af5dffa1c4675368c667
tests/schema-validator.py
tests/schema-validator.py
#!/usr/bin/python import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../third-party/jsonschema") import jsonschema import jsonschema.exceptions def main(argv): if len(argv) < 3: print "Usage: " print "\t" + os.path.basename(__file__) + " <json file> <schema file>" sys.exit(-1) jsonFile = open(argv[1]) jsonContents = jsonFile.read() jsonFile.close() schemaFile = open(argv[2]) jsonSchema = schemaFile.read() schemaFile.close() try: jsonschema.validate(eval(jsonContents), eval(jsonSchema)) print "Provided JSON is valid against the schema." except jsonschema.ValidationError as e: print e if (__name__ == "__main__"): sys.exit(main(sys.argv))
#!/usr/bin/python import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../third-party/jsonschema") import jsonschema import jsonschema.exceptions def main(argv): if len(argv) < 3: print "Usage: " print "\t" + os.path.basename(__file__) + " <json file> <schema file>" sys.exit(-1) jsonFile = open(argv[1]) jsonContents = jsonFile.read() jsonContents = jsonContents.replace('false', 'False') jsonContents = jsonContents.replace('true', 'True') jsonFile.close() schemaFile = open(argv[2]) jsonSchema = schemaFile.read() jsonSchema = jsonSchema.replace('false', 'False') jsonSchema = jsonSchema.replace('true', 'True') schemaFile.close() try: jsonschema.validate(eval(jsonContents), eval(jsonSchema)) print "Provided JSON is valid against the schema." except jsonschema.ValidationError as e: print e if (__name__ == "__main__"): sys.exit(main(sys.argv))
Replace true/false from JSON to python False/True
Replace true/false from JSON to python False/True Signed-off-by: Vivek Galatage <bbe41406aa2af935662c4582fd181c8ca0156a8e@visteon.com>
Python
mit
vivekgalatage/libtracing
<INSERT> jsonContents = jsonContents.replace('false', 'False') jsonContents = jsonContents.replace('true', 'True') <INSERT_END> <INSERT> jsonSchema = jsonSchema.replace('false', 'False') jsonSchema = jsonSchema.replace('true', 'True') <INSERT_END> <|endoftext|> #!/usr/bin/python import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../third-party/jsonschema") import jsonschema import jsonschema.exceptions def main(argv): if len(argv) < 3: print "Usage: " print "\t" + os.path.basename(__file__) + " <json file> <schema file>" sys.exit(-1) jsonFile = open(argv[1]) jsonContents = jsonFile.read() jsonContents = jsonContents.replace('false', 'False') jsonContents = jsonContents.replace('true', 'True') jsonFile.close() schemaFile = open(argv[2]) jsonSchema = schemaFile.read() jsonSchema = jsonSchema.replace('false', 'False') jsonSchema = jsonSchema.replace('true', 'True') schemaFile.close() try: jsonschema.validate(eval(jsonContents), eval(jsonSchema)) print "Provided JSON is valid against the schema." except jsonschema.ValidationError as e: print e if (__name__ == "__main__"): sys.exit(main(sys.argv))
Replace true/false from JSON to python False/True Signed-off-by: Vivek Galatage <bbe41406aa2af935662c4582fd181c8ca0156a8e@visteon.com> #!/usr/bin/python import os import sys sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)) + "/../third-party/jsonschema") import jsonschema import jsonschema.exceptions def main(argv): if len(argv) < 3: print "Usage: " print "\t" + os.path.basename(__file__) + " <json file> <schema file>" sys.exit(-1) jsonFile = open(argv[1]) jsonContents = jsonFile.read() jsonFile.close() schemaFile = open(argv[2]) jsonSchema = schemaFile.read() schemaFile.close() try: jsonschema.validate(eval(jsonContents), eval(jsonSchema)) print "Provided JSON is valid against the schema." except jsonschema.ValidationError as e: print e if (__name__ == "__main__"): sys.exit(main(sys.argv))
94796ca0107e6c676e3905675290bbe147169717
hoppy/deploy.py
hoppy/deploy.py
from restkit import Resource from hoppy import api_key class Deploy(Resource): def __init__(self, use_ssl=False): self.api_key = api_key super(Deploy, self).__init__(self.host, follow_redirect=True) def check_configuration(self): if not self.api_key: raise HoptoadError('API Key cannot be blank') def request(self, *args, **kwargs): response = super(Deploy, self).request( api_key=self.api_key, *args, **kwargs) return response.body_string() def base_uri(self, use_ssl=False): base = 'http://hoptoadapp.com/deploys.txt' base = base.replace('http://', 'https://') if use_ssl else base return base def deploy(self, env, **kwargs): """ Optional parameters accepted by Hoptoad are: scm_revision scm_repository local_username """ params = {} params['deploy[rails_env]'] = env for key, value in kwargs: params['deploy[%s]' % key] = value return self.post(**params)
from hoppy.api import HoptoadResource class Deploy(HoptoadResource): def __init__(self, use_ssl=False): from hoppy import api_key self.api_key = api_key super(Deploy, self).__init__(use_ssl) def check_configuration(self): if not self.api_key: raise HoptoadError('API Key cannot be blank') def request(self, *args, **kwargs): response = super(Deploy, self).request( api_key=self.api_key, *args, **kwargs) return response def base_uri(self, use_ssl=False): base = 'http://hoptoadapp.com/deploys.txt' base = base.replace('http://', 'https://') if use_ssl else base return base def deploy(self, env, **kwargs): """ Optional parameters accepted by Hoptoad are: scm_revision scm_repository local_username """ params = {} params['deploy[rails_env]'] = env for key, value in kwargs.iteritems(): params['deploy[%s]' % key] = value return self.post(**params)
Test Deploy resource after reworking.
Test Deploy resource after reworking.
Python
mit
peplin/hoppy
<REPLACE_OLD> restkit <REPLACE_NEW> hoppy.api <REPLACE_END> <REPLACE_OLD> Resource from <REPLACE_NEW> HoptoadResource class Deploy(HoptoadResource): def __init__(self, use_ssl=False): from <REPLACE_END> <REPLACE_OLD> api_key class Deploy(Resource): def __init__(self, use_ssl=False): <REPLACE_NEW> api_key <REPLACE_END> <REPLACE_OLD> self).__init__(self.host, follow_redirect=True) <REPLACE_NEW> self).__init__(use_ssl) <REPLACE_END> <REPLACE_OLD> response.body_string() <REPLACE_NEW> response <REPLACE_END> <REPLACE_OLD> kwargs: <REPLACE_NEW> kwargs.iteritems(): <REPLACE_END> <|endoftext|> from hoppy.api import HoptoadResource class Deploy(HoptoadResource): def __init__(self, use_ssl=False): from hoppy import api_key self.api_key = api_key super(Deploy, self).__init__(use_ssl) def check_configuration(self): if not self.api_key: raise HoptoadError('API Key cannot be blank') def request(self, *args, **kwargs): response = super(Deploy, self).request( api_key=self.api_key, *args, **kwargs) return response def base_uri(self, use_ssl=False): base = 'http://hoptoadapp.com/deploys.txt' base = base.replace('http://', 'https://') if use_ssl else base return base def deploy(self, env, **kwargs): """ Optional parameters accepted by Hoptoad are: scm_revision scm_repository local_username """ params = {} params['deploy[rails_env]'] = env for key, value in kwargs.iteritems(): params['deploy[%s]' % key] = value return self.post(**params)
Test Deploy resource after reworking. from restkit import Resource from hoppy import api_key class Deploy(Resource): def __init__(self, use_ssl=False): self.api_key = api_key super(Deploy, self).__init__(self.host, follow_redirect=True) def check_configuration(self): if not self.api_key: raise HoptoadError('API Key cannot be blank') def request(self, *args, **kwargs): response = super(Deploy, self).request( api_key=self.api_key, *args, **kwargs) return response.body_string() def base_uri(self, use_ssl=False): base = 'http://hoptoadapp.com/deploys.txt' base = base.replace('http://', 'https://') if use_ssl else base return base def deploy(self, env, **kwargs): """ Optional parameters accepted by Hoptoad are: scm_revision scm_repository local_username """ params = {} params['deploy[rails_env]'] = env for key, value in kwargs: params['deploy[%s]' % key] = value return self.post(**params)
4938e7e66d187a375db24832c0bfdba79687756e
accelerator/migrations/0003_auto_20180410_1618.py
accelerator/migrations/0003_auto_20180410_1618.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-04-10 20:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accelerator', '0002_add_mc_permissions'), ] operations = [ migrations.AlterField( model_name='mentorprogramofficehour', name='location', field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50), ), ]
Add Migration For Office Hours Locations
Add Migration For Office Hours Locations
Python
mit
masschallenge/django-accelerator,masschallenge/django-accelerator
<INSERT> # -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-04-10 20:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): <INSERT_END> <INSERT> dependencies = [ ('accelerator', '0002_add_mc_permissions'), ] operations = [ migrations.AlterField( model_name='mentorprogramofficehour', name='location', field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50), ), ] <INSERT_END> <|endoftext|> # -*- coding: utf-8 -*- # Generated by Django 1.10.8 on 2018-04-10 20:18 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('accelerator', '0002_add_mc_permissions'), ] operations = [ migrations.AlterField( model_name='mentorprogramofficehour', name='location', field=models.CharField(choices=[('MassChallenge Boston', 'MassChallenge Boston'), ('MassChallenge Israel - Jerusalem', 'MassChallenge Israel - Jerusalem'), ('MassChallenge Israel - Tel Aviv', 'MassChallenge Israel - Tel Aviv'), ('MassChallenge Mexico', 'MassChallenge Mexico'), ('MassChallenge Rhode Island', 'MassChallenge Rhode Island'), ('MassChallenge Switzerland', 'MassChallenge Switzerland'), ('MassChallenge Texas', 'MassChallenge Texas'), ('Newton Innovation Center (NIC)', 'Newton Innovation Center (NIC)'), ('PULSE@MassChallenge', 'PULSE@MassChallenge'), ('Remote', 'Remote')], max_length=50), ), ]
Add Migration For Office Hours Locations
d64a171dfde57106a5abd7d46990c81c6250b965
whitespaceterminator.py
whitespaceterminator.py
# coding: utf8 # Copyright © 2011 Kozea # Licensed under a 3-clause BSD license. """ Strip trailing whitespace before saving. """ from gi.repository import GObject, Gedit class WhiteSpaceTerminator(GObject.Object, Gedit.WindowActivatable): """Strip trailing whitespace before saving.""" window = GObject.property(type=Gedit.Window) def do_activate(self): self.window.connect("tab-added", self.on_tab_added) def on_tab_added(self, window, tab, data=None): tab.get_document().connect("save", self.on_document_save) def on_document_save(self, document, location, encoding, compression, flags, data=None): for i, text in enumerate(document.props.text.rstrip().split("\n")): strip_stop = document.get_iter_at_line(i) strip_stop.forward_to_line_end() strip_start = strip_stop.copy() strip_start.backward_chars(len(text) - len(text.rstrip())) document.delete(strip_start, strip_stop) document.delete(strip_start, document.get_end_iter())
# coding: utf8 # Copyright © 2011 Kozea # Licensed under a 3-clause BSD license. """ Strip trailing whitespace before saving. """ from gi.repository import GObject, Gedit class WhiteSpaceTerminator(GObject.Object, Gedit.WindowActivatable): """Strip trailing whitespace before saving.""" window = GObject.property(type=Gedit.Window) def do_activate(self): self.window.connect("tab-added", self.on_tab_added) for document in self.window.get_documents(): document.connect("save", self.on_document_save) def on_tab_added(self, window, tab, data=None): tab.get_document().connect("save", self.on_document_save) def on_document_save(self, document, location, encoding, compression, flags, data=None): for i, text in enumerate(document.props.text.rstrip().split("\n")): strip_stop = document.get_iter_at_line(i) strip_stop.forward_to_line_end() strip_start = strip_stop.copy() strip_start.backward_chars(len(text) - len(text.rstrip())) document.delete(strip_start, strip_stop) document.delete(strip_start, document.get_end_iter())
Connect on existing tabs when activating the plugin.
Connect on existing tabs when activating the plugin.
Python
bsd-3-clause
Kozea/Gedit-WhiteSpace-Terminator
<REPLACE_OLD> self.on_tab_added) <REPLACE_NEW> self.on_tab_added) for document in self.window.get_documents(): document.connect("save", self.on_document_save) <REPLACE_END> <|endoftext|> # coding: utf8 # Copyright © 2011 Kozea # Licensed under a 3-clause BSD license. """ Strip trailing whitespace before saving. """ from gi.repository import GObject, Gedit class WhiteSpaceTerminator(GObject.Object, Gedit.WindowActivatable): """Strip trailing whitespace before saving.""" window = GObject.property(type=Gedit.Window) def do_activate(self): self.window.connect("tab-added", self.on_tab_added) for document in self.window.get_documents(): document.connect("save", self.on_document_save) def on_tab_added(self, window, tab, data=None): tab.get_document().connect("save", self.on_document_save) def on_document_save(self, document, location, encoding, compression, flags, data=None): for i, text in enumerate(document.props.text.rstrip().split("\n")): strip_stop = document.get_iter_at_line(i) strip_stop.forward_to_line_end() strip_start = strip_stop.copy() strip_start.backward_chars(len(text) - len(text.rstrip())) document.delete(strip_start, strip_stop) document.delete(strip_start, document.get_end_iter())
Connect on existing tabs when activating the plugin. # coding: utf8 # Copyright © 2011 Kozea # Licensed under a 3-clause BSD license. """ Strip trailing whitespace before saving. """ from gi.repository import GObject, Gedit class WhiteSpaceTerminator(GObject.Object, Gedit.WindowActivatable): """Strip trailing whitespace before saving.""" window = GObject.property(type=Gedit.Window) def do_activate(self): self.window.connect("tab-added", self.on_tab_added) def on_tab_added(self, window, tab, data=None): tab.get_document().connect("save", self.on_document_save) def on_document_save(self, document, location, encoding, compression, flags, data=None): for i, text in enumerate(document.props.text.rstrip().split("\n")): strip_stop = document.get_iter_at_line(i) strip_stop.forward_to_line_end() strip_start = strip_stop.copy() strip_start.backward_chars(len(text) - len(text.rstrip())) document.delete(strip_start, strip_stop) document.delete(strip_start, document.get_end_iter())
743ae5270d2ba24da652110a967f15b5fa526e3d
plugins/plugin_nginx_error.py
plugins/plugin_nginx_error.py
#!/usr/bin/env python # -*- coding:utf-8 -*- import re from manager import Plugin class NginxError(Plugin): def __init__(self, **kwargs): self.keywords = ['nginx', 'error'] self.total_line = 0 self.level_dict = {"error": 0, "notice": 0, "info": 0} self.client_dict = {} def process(self, **kwargs): """docstring for process""" self.total_line += 1 self.level_dict[kwargs['level']] += 1 message = kwargs['message'] m = re.match(".*client: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", message) if m: if m.group(1) in self.client_dict: self.client_dict[m.group(1)] += 1 else: self.client_dict[m.group(1)] = 1 def report(self, **kwargs): """docstring for report""" print "== Nginx Error ==" print "Nginx total error line: %d" % self.total_line for level in self.level_dict.keys(): print "%s: %d" % (level, self.level_dict[level]) for client in self.client_dict.keys(): print "%s: %d" % (client, self.client_dict[client])
Add plugin for parse nginx error
Add plugin for parse nginx error
Python
apache-2.0
keepzero/fluent-mongo-parser
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python # -*- coding:utf-8 -*- import re from manager import Plugin class NginxError(Plugin): def __init__(self, **kwargs): self.keywords = ['nginx', 'error'] self.total_line = 0 self.level_dict = {"error": 0, "notice": 0, "info": 0} self.client_dict = {} def process(self, **kwargs): """docstring for process""" self.total_line += 1 self.level_dict[kwargs['level']] += 1 message = kwargs['message'] m = re.match(".*client: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", message) if m: if m.group(1) in self.client_dict: self.client_dict[m.group(1)] += 1 else: self.client_dict[m.group(1)] = 1 def report(self, **kwargs): """docstring for report""" print "== Nginx Error ==" print "Nginx total error line: %d" % self.total_line for level in self.level_dict.keys(): print "%s: %d" % (level, self.level_dict[level]) for client in self.client_dict.keys(): print "%s: %d" % (client, self.client_dict[client]) <REPLACE_END> <|endoftext|> #!/usr/bin/env python # -*- coding:utf-8 -*- import re from manager import Plugin class NginxError(Plugin): def __init__(self, **kwargs): self.keywords = ['nginx', 'error'] self.total_line = 0 self.level_dict = {"error": 0, "notice": 0, "info": 0} self.client_dict = {} def process(self, **kwargs): """docstring for process""" self.total_line += 1 self.level_dict[kwargs['level']] += 1 message = kwargs['message'] m = re.match(".*client: (\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}).*", message) if m: if m.group(1) in self.client_dict: self.client_dict[m.group(1)] += 1 else: self.client_dict[m.group(1)] = 1 def report(self, **kwargs): """docstring for report""" print "== Nginx Error ==" print "Nginx total error line: %d" % self.total_line for level in self.level_dict.keys(): print "%s: %d" % (level, self.level_dict[level]) for client in self.client_dict.keys(): print "%s: %d" % (client, self.client_dict[client])
Add plugin for parse nginx error
579fbe76f8ab86d5f599f9888149fcaed297842c
src/sentry/celery.py
src/sentry/celery.py
from __future__ import absolute_import import celery import os import os.path import sys # Add the project to the python path sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) # Configure the application only if it seemingly isnt already configured from django.conf import settings if not settings.configured: from sentry.runner import configure configure() from sentry.utils import metrics class Celery(celery.Celery): def on_configure(self): from raven.contrib.django.models import client from raven.contrib.celery import register_signal, register_logger_signal # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) app = Celery('sentry') OriginalTask = app.Task class SentryTask(OriginalTask): def apply_async(self, args, kwargs): key = 'jobs.delay' instance = self.name with metrics.timer(key, instance=instance): return OriginalTask.apply_async(self, args, kwargs) app.Task = SentryTask # Using a string here means the worker will not have to # pickle the object when using Windows. app.config_from_object(settings) app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) if __name__ == '__main__': app.start()
from __future__ import absolute_import import celery import os import os.path import sys # Add the project to the python path sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) # Configure the application only if it seemingly isnt already configured from django.conf import settings if not settings.configured: from sentry.runner import configure configure() from sentry.utils import metrics class Celery(celery.Celery): def on_configure(self): from raven.contrib.django.models import client from raven.contrib.celery import register_signal, register_logger_signal # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) app = Celery('sentry') OriginalTask = app.Task class SentryTask(OriginalTask): def apply_async(self, *args, **kwargs): key = 'jobs.delay' instance = self.name with metrics.timer(key, instance=instance): return OriginalTask.apply_async(self, *args, **kwargs) app.Task = SentryTask # Using a string here means the worker will not have to # pickle the object when using Windows. app.config_from_object(settings) app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) if __name__ == '__main__': app.start()
Use star args to invoke apply_async
Use star args to invoke apply_async
Python
bsd-3-clause
zenefits/sentry,nicholasserra/sentry,fotinakis/sentry,JamesMura/sentry,alexm92/sentry,nicholasserra/sentry,BuildingLink/sentry,gencer/sentry,daevaorn/sentry,beeftornado/sentry,JamesMura/sentry,mvaled/sentry,jean/sentry,JamesMura/sentry,fotinakis/sentry,mvaled/sentry,JamesMura/sentry,beeftornado/sentry,looker/sentry,fotinakis/sentry,jean/sentry,ifduyue/sentry,zenefits/sentry,ifduyue/sentry,daevaorn/sentry,JackDanger/sentry,zenefits/sentry,jean/sentry,gencer/sentry,jean/sentry,BuildingLink/sentry,BuildingLink/sentry,mvaled/sentry,JackDanger/sentry,looker/sentry,daevaorn/sentry,JamesMura/sentry,mitsuhiko/sentry,fotinakis/sentry,daevaorn/sentry,gencer/sentry,mvaled/sentry,mvaled/sentry,gencer/sentry,ifduyue/sentry,nicholasserra/sentry,JackDanger/sentry,BuildingLink/sentry,mitsuhiko/sentry,alexm92/sentry,jean/sentry,BuildingLink/sentry,looker/sentry,mvaled/sentry,beeftornado/sentry,looker/sentry,zenefits/sentry,looker/sentry,alexm92/sentry,ifduyue/sentry,gencer/sentry,ifduyue/sentry,zenefits/sentry
<REPLACE_OLD> args, kwargs): <REPLACE_NEW> *args, **kwargs): <REPLACE_END> <REPLACE_OLD> args, kwargs) app.Task <REPLACE_NEW> *args, **kwargs) app.Task <REPLACE_END> <|endoftext|> from __future__ import absolute_import import celery import os import os.path import sys # Add the project to the python path sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) # Configure the application only if it seemingly isnt already configured from django.conf import settings if not settings.configured: from sentry.runner import configure configure() from sentry.utils import metrics class Celery(celery.Celery): def on_configure(self): from raven.contrib.django.models import client from raven.contrib.celery import register_signal, register_logger_signal # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) app = Celery('sentry') OriginalTask = app.Task class SentryTask(OriginalTask): def apply_async(self, *args, **kwargs): key = 'jobs.delay' instance = self.name with metrics.timer(key, instance=instance): return OriginalTask.apply_async(self, *args, **kwargs) app.Task = SentryTask # Using a string here means the worker will not have to # pickle the object when using Windows. app.config_from_object(settings) app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) if __name__ == '__main__': app.start()
Use star args to invoke apply_async from __future__ import absolute_import import celery import os import os.path import sys # Add the project to the python path sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir)) # Configure the application only if it seemingly isnt already configured from django.conf import settings if not settings.configured: from sentry.runner import configure configure() from sentry.utils import metrics class Celery(celery.Celery): def on_configure(self): from raven.contrib.django.models import client from raven.contrib.celery import register_signal, register_logger_signal # register a custom filter to filter out duplicate logs register_logger_signal(client) # hook into the Celery error handler register_signal(client) app = Celery('sentry') OriginalTask = app.Task class SentryTask(OriginalTask): def apply_async(self, args, kwargs): key = 'jobs.delay' instance = self.name with metrics.timer(key, instance=instance): return OriginalTask.apply_async(self, args, kwargs) app.Task = SentryTask # Using a string here means the worker will not have to # pickle the object when using Windows. app.config_from_object(settings) app.autodiscover_tasks(lambda: settings.INSTALLED_APPS) if __name__ == '__main__': app.start()
9cf6e843eeb865eeaf90e4023bdccd1325e74535
test_rle.py
test_rle.py
import pypolycomp import numpy as np def test_compression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): compressed = pypolycomp.rle_compress(np.array([1, 1, 1, 2, 3], dtype=cur_type)) assert np.all(compressed == np.array([3, 1, 1, 2, 1, 3], dtype=cur_type))
import pypolycomp import numpy as np def test_compression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): compressed = pypolycomp.rle_compress(np.array([1, 1, 1, 2, 3], dtype=cur_type)) assert np.all(compressed == np.array([3, 1, 1, 2, 1, 3], dtype=cur_type)) def test_decompression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): input_values = np.array(np.random.randint(100, size=1000), dtype=cur_type) compressed = pypolycomp.rle_compress(input_values) output_values = pypolycomp.rle_decompress(compressed) assert np.all(input_values == output_values)
Add test for RLE decompression
Add test for RLE decompression
Python
bsd-3-clause
ziotom78/polycomp
<REPLACE_OLD> dtype=cur_type)) <REPLACE_NEW> dtype=cur_type)) def test_decompression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): input_values = np.array(np.random.randint(100, size=1000), dtype=cur_type) compressed = pypolycomp.rle_compress(input_values) output_values = pypolycomp.rle_decompress(compressed) assert np.all(input_values == output_values) <REPLACE_END> <|endoftext|> import pypolycomp import numpy as np def test_compression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): compressed = pypolycomp.rle_compress(np.array([1, 1, 1, 2, 3], dtype=cur_type)) assert np.all(compressed == np.array([3, 1, 1, 2, 1, 3], dtype=cur_type)) def test_decompression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): input_values = np.array(np.random.randint(100, size=1000), dtype=cur_type) compressed = pypolycomp.rle_compress(input_values) output_values = pypolycomp.rle_decompress(compressed) assert np.all(input_values == output_values)
Add test for RLE decompression import pypolycomp import numpy as np def test_compression(): for cur_type in (np.int8, np.int16, np.int32, np.int64, np.uint8, np.uint16, np.uint32, np.uint64): compressed = pypolycomp.rle_compress(np.array([1, 1, 1, 2, 3], dtype=cur_type)) assert np.all(compressed == np.array([3, 1, 1, 2, 1, 3], dtype=cur_type))
8528f21397672b5719fcf4edecd8efa3a1eec60a
cellardoor/serializers/json_serializer.py
cellardoor/serializers/json_serializer.py
import re import json from datetime import datetime from . import Serializer class CellarDoorJSONEncoder(json.JSONEncoder): def default(self, obj): try: iterable = iter(obj) except TypeError: pass else: return list(iterable) if isinstance(obj, datetime): return obj.isoformat() return super(CellarDoorJSONEncoder, self).default(obj) def as_date(obj): if '_date' in obj: return datetime(*map(int, re.split('[^\d]', obj['_date'])[:-1])) else: return obj class JSONSerializer(Serializer): mimetype = 'application/json' def serialize(self, obj): return json.dumps(obj, cls=CellarDoorJSONEncoder) def unserialize(self, stream): return json.load(stream, object_hook=as_date) def unserialize_string(self, data): return json.loads(data, object_hook=as_date)
import re import json from datetime import datetime import collections from . import Serializer class CellarDoorJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, collections.Iterable): return list(obj) if isinstance(obj, datetime): return obj.isoformat() return super(CellarDoorJSONEncoder, self).default(obj) def as_date(obj): if '_date' in obj: return datetime(*map(int, re.split('[^\d]', obj['_date'])[:-1])) else: return obj class JSONSerializer(Serializer): mimetype = 'application/json' def serialize(self, obj): return json.dumps(obj, cls=CellarDoorJSONEncoder) def unserialize(self, stream): return json.load(stream, object_hook=as_date) def unserialize_string(self, data): return json.loads(data, object_hook=as_date)
Use more reliable method of detecting iterables
Use more reliable method of detecting iterables
Python
mit
cooper-software/cellardoor
<REPLACE_OLD> datetime from <REPLACE_NEW> datetime import collections from <REPLACE_END> <REPLACE_OLD> obj): try: iterable = iter(obj) except TypeError: pass else: return list(iterable) if <REPLACE_NEW> obj): if isinstance(obj, collections.Iterable): return list(obj) if <REPLACE_END> <|endoftext|> import re import json from datetime import datetime import collections from . import Serializer class CellarDoorJSONEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, collections.Iterable): return list(obj) if isinstance(obj, datetime): return obj.isoformat() return super(CellarDoorJSONEncoder, self).default(obj) def as_date(obj): if '_date' in obj: return datetime(*map(int, re.split('[^\d]', obj['_date'])[:-1])) else: return obj class JSONSerializer(Serializer): mimetype = 'application/json' def serialize(self, obj): return json.dumps(obj, cls=CellarDoorJSONEncoder) def unserialize(self, stream): return json.load(stream, object_hook=as_date) def unserialize_string(self, data): return json.loads(data, object_hook=as_date)
Use more reliable method of detecting iterables import re import json from datetime import datetime from . import Serializer class CellarDoorJSONEncoder(json.JSONEncoder): def default(self, obj): try: iterable = iter(obj) except TypeError: pass else: return list(iterable) if isinstance(obj, datetime): return obj.isoformat() return super(CellarDoorJSONEncoder, self).default(obj) def as_date(obj): if '_date' in obj: return datetime(*map(int, re.split('[^\d]', obj['_date'])[:-1])) else: return obj class JSONSerializer(Serializer): mimetype = 'application/json' def serialize(self, obj): return json.dumps(obj, cls=CellarDoorJSONEncoder) def unserialize(self, stream): return json.load(stream, object_hook=as_date) def unserialize_string(self, data): return json.loads(data, object_hook=as_date)
52f8e68835eb67e522dd8f1c7725d460eaa2cab7
RPS/rps-1.py
RPS/rps-1.py
# A simple rock, paper, scissors script submitted as a demo of easy game-making # In Python # Certain parts of this program are functional—that is, written in functions that # work together. Some parts aren't. As we improve the program, you'll find that # This functional way of doing things has some real advantages. import random # We need thr random module for the computer to play # This dictionary relates a choice to what it defeats for easy comparison later. beats = { "rock":"scissors", "paper":"rock", "scissors":"paper" } # Now we make an easy-to-use list of choices from the beats choices = list(beats.keys()) # Get the player choice from the input command. # The lower() is used to correct for people typing with capitals. # The strip() removes any trailing space that might mess us up. player_choice = input("Rock, paper, or scissors?").lower().strip() # Finally, time to compare! But we have to account for cheaters/mistakes. if player_choice not in choices: print("You must choose r, p, or s!") else: # Here we make the computer choose, then compare the two. computer_choice = random.choice(choices) if beats[player_choice] == computer_choice print("You win!") else: print("You lose") """ This program works, but it's quite limited. Probably its biggest limitation is that it will only run once. It also doesn't keep score—why would it, since it only runs once? Our next version of the game will make it more user-friendly. """
""" A simple rock, paper, scissors script submitted as a demo of easy game-making In Python. """ import random # We need thr random module for the computer to play # This dictionary relates a choice to what it defeats for easy comparison later. beats = { "rock":"scissors", "paper":"rock", "scissors":"paper" } # Now we make an easy-to-use list of choices from the beats choices = list(beats.keys()) # Get the player choice from the input command. # The lower() is used to correct for people typing with capitals. # The strip() removes any trailing space that might mess us up. player_choice = input("Rock, paper, or scissors?").lower().strip() # Finally, time to compare! But we have to account for cheaters/mistakes. if player_choice not in choices: print("You must choose r, p, or s!") else: # Here we make the computer choose, then compare the two. computer_choice = random.choice(choices) if beats[player_choice] == computer_choice print("You win!") else: print("You lose") """ This program works, but it's quite limited. Probably its biggest limitation is that it will only run once. It also doesn't keep score—why would it, since it only runs once? Our next version of the game will make it more user-friendly. """
Refactor out functions from rps1
Refactor out functions from rps1
Python
mit
mttaggart/python-cs
<REPLACE_OLD> # A <REPLACE_NEW> """ A <REPLACE_END> <REPLACE_OLD> game-making # In Python # Certain parts of this program are functional—that is, written in functions that # work together. Some parts aren't. As we improve the program, you'll find that # This functional way of doing things has some real advantages. import <REPLACE_NEW> game-making In Python. """ import <REPLACE_END> <|endoftext|> """ A simple rock, paper, scissors script submitted as a demo of easy game-making In Python. """ import random # We need thr random module for the computer to play # This dictionary relates a choice to what it defeats for easy comparison later. beats = { "rock":"scissors", "paper":"rock", "scissors":"paper" } # Now we make an easy-to-use list of choices from the beats choices = list(beats.keys()) # Get the player choice from the input command. # The lower() is used to correct for people typing with capitals. # The strip() removes any trailing space that might mess us up. player_choice = input("Rock, paper, or scissors?").lower().strip() # Finally, time to compare! But we have to account for cheaters/mistakes. if player_choice not in choices: print("You must choose r, p, or s!") else: # Here we make the computer choose, then compare the two. computer_choice = random.choice(choices) if beats[player_choice] == computer_choice print("You win!") else: print("You lose") """ This program works, but it's quite limited. Probably its biggest limitation is that it will only run once. It also doesn't keep score—why would it, since it only runs once? Our next version of the game will make it more user-friendly. """
Refactor out functions from rps1 # A simple rock, paper, scissors script submitted as a demo of easy game-making # In Python # Certain parts of this program are functional—that is, written in functions that # work together. Some parts aren't. As we improve the program, you'll find that # This functional way of doing things has some real advantages. import random # We need thr random module for the computer to play # This dictionary relates a choice to what it defeats for easy comparison later. beats = { "rock":"scissors", "paper":"rock", "scissors":"paper" } # Now we make an easy-to-use list of choices from the beats choices = list(beats.keys()) # Get the player choice from the input command. # The lower() is used to correct for people typing with capitals. # The strip() removes any trailing space that might mess us up. player_choice = input("Rock, paper, or scissors?").lower().strip() # Finally, time to compare! But we have to account for cheaters/mistakes. if player_choice not in choices: print("You must choose r, p, or s!") else: # Here we make the computer choose, then compare the two. computer_choice = random.choice(choices) if beats[player_choice] == computer_choice print("You win!") else: print("You lose") """ This program works, but it's quite limited. Probably its biggest limitation is that it will only run once. It also doesn't keep score—why would it, since it only runs once? Our next version of the game will make it more user-friendly. """
012235fd93e77de19065a0e906554887e27580fd
kitsune/sumo/models.py
kitsune/sumo/models.py
from django.conf import settings from django.db import models class ModelBase(models.Model): """Base class for SUMO models. * Adds objects_range class method. * Adds update method. """ class Meta: abstract = True @classmethod def objects_range(cls, before=None, after=None): """ Returns a QuerySet of rows updated before, after or between the supplied datetimes. The `updated_column_name` property must be defined on a model using this, as that will be used as the column to filter on. """ column_name = getattr(cls, "updated_column_name", None) if not column_name: raise NotImplementedError queryset = cls._default_manager if before: queryset = queryset.filter(**{f"{column_name}__lt": before}) if after: queryset = queryset.filter(**{f"{column_name}__gt": after}) return queryset class LocaleField(models.CharField): """CharField with locale settings specific to SUMO defaults.""" def __init__( self, max_length=7, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGE_CHOICES, *args, **kwargs, ): return super(LocaleField, self).__init__( max_length=max_length, default=default, choices=choices, *args, **kwargs )
from django.conf import settings from django.db import models class ModelBase(models.Model): """Base class for SUMO models. * Adds objects_range class method. * Adds update method. """ class Meta: abstract = True @classmethod def objects_range(cls, before=None, after=None): """ Returns a QuerySet of rows updated before, after or between the supplied datetimes. The `updated_column_name` property must be defined on a model using this, as that will be used as the column to filter on. """ column_name = getattr(cls, "updated_column_name", None) if not column_name: raise NotImplementedError queryset = cls._default_manager if before: queryset = queryset.filter(**{f"{column_name}__lt": before}) if after: queryset = queryset.filter(**{f"{column_name}__gt": after}) return queryset def update(self, **kw): """Shortcicuit to the update method.""" self.__class__.objects.filter(pk=self.pk).update(**kw) class LocaleField(models.CharField): """CharField with locale settings specific to SUMO defaults.""" def __init__( self, max_length=7, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGE_CHOICES, *args, **kwargs, ): return super(LocaleField, self).__init__( max_length=max_length, default=default, choices=choices, *args, **kwargs )
Use Django's default update method
Use Django's default update method
Python
bsd-3-clause
mozilla/kitsune,mozilla/kitsune,mozilla/kitsune,mozilla/kitsune
<REPLACE_OLD> queryset class <REPLACE_NEW> queryset def update(self, **kw): """Shortcicuit to the update method.""" self.__class__.objects.filter(pk=self.pk).update(**kw) class <REPLACE_END> <|endoftext|> from django.conf import settings from django.db import models class ModelBase(models.Model): """Base class for SUMO models. * Adds objects_range class method. * Adds update method. """ class Meta: abstract = True @classmethod def objects_range(cls, before=None, after=None): """ Returns a QuerySet of rows updated before, after or between the supplied datetimes. The `updated_column_name` property must be defined on a model using this, as that will be used as the column to filter on. """ column_name = getattr(cls, "updated_column_name", None) if not column_name: raise NotImplementedError queryset = cls._default_manager if before: queryset = queryset.filter(**{f"{column_name}__lt": before}) if after: queryset = queryset.filter(**{f"{column_name}__gt": after}) return queryset def update(self, **kw): """Shortcicuit to the update method.""" self.__class__.objects.filter(pk=self.pk).update(**kw) class LocaleField(models.CharField): """CharField with locale settings specific to SUMO defaults.""" def __init__( self, max_length=7, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGE_CHOICES, *args, **kwargs, ): return super(LocaleField, self).__init__( max_length=max_length, default=default, choices=choices, *args, **kwargs )
Use Django's default update method from django.conf import settings from django.db import models class ModelBase(models.Model): """Base class for SUMO models. * Adds objects_range class method. * Adds update method. """ class Meta: abstract = True @classmethod def objects_range(cls, before=None, after=None): """ Returns a QuerySet of rows updated before, after or between the supplied datetimes. The `updated_column_name` property must be defined on a model using this, as that will be used as the column to filter on. """ column_name = getattr(cls, "updated_column_name", None) if not column_name: raise NotImplementedError queryset = cls._default_manager if before: queryset = queryset.filter(**{f"{column_name}__lt": before}) if after: queryset = queryset.filter(**{f"{column_name}__gt": after}) return queryset class LocaleField(models.CharField): """CharField with locale settings specific to SUMO defaults.""" def __init__( self, max_length=7, default=settings.LANGUAGE_CODE, choices=settings.LANGUAGE_CHOICES, *args, **kwargs, ): return super(LocaleField, self).__init__( max_length=max_length, default=default, choices=choices, *args, **kwargs )
f46770697d668e31518ada41d31fdb59a84f3cf6
kokki/cookbooks/aws/recipes/default.py
kokki/cookbooks/aws/recipes/default.py
from kokki import * Package("python-boto") # Mount volumes and format is necessary for vol in env.config.aws.volumes: env.cookbooks.aws.EBSVolume(vol['volume_id'], availability_zone = env.config.aws.availability_zone, device = vol['device'], action = "attach") if vol.get('fstype'): if vol['fstype'] == "xfs": Package("xfsprogs") Execute("mkfs.%(fstype)s -f %(device)s" % vol, not_if = """if [ "`file -s %(device)s`" = "%(device)s: data" ]; then exit 1; fi""" % vol) if vol.get('mount_point'): Mount(vol['mount_point'], device = vol['device'], fstype = vol.get('fstype'), options = vol.get('fsoptions', ["noatime"]), action = ["mount", "enable"])
import os from kokki import * # Package("python-boto") Execute("pip install git+http://github.com/boto/boto.git#egg=boto", not_if = 'python -c "import boto"') Execute("mv /usr/lib/pymodules/python2.6/boto /tmp/boto.orig", only_if = os.path.exists("/usr/lib/pymodules/python2.6/boto")) # Mount volumes and format is necessary for vol in env.config.aws.volumes: env.cookbooks.aws.EBSVolume(vol['volume_id'], availability_zone = env.config.aws.availability_zone, device = vol['device'], action = "attach") if vol.get('fstype'): if vol['fstype'] == "xfs": Package("xfsprogs") Execute("mkfs.%(fstype)s -f %(device)s" % vol, not_if = """if [ "`file -s %(device)s`" = "%(device)s: data" ]; then exit 1; fi""" % vol) if vol.get('mount_point'): Mount(vol['mount_point'], device = vol['device'], fstype = vol.get('fstype'), options = vol.get('fsoptions', ["noatime"]), action = ["mount", "enable"])
Install github verison of boto in aws cookbook (for now)
Install github verison of boto in aws cookbook (for now)
Python
bsd-3-clause
samuel/kokki
<REPLACE_OLD> from <REPLACE_NEW> import os from <REPLACE_END> <REPLACE_OLD> * Package("python-boto") # <REPLACE_NEW> * # Package("python-boto") Execute("pip install git+http://github.com/boto/boto.git#egg=boto", not_if = 'python -c "import boto"') Execute("mv /usr/lib/pymodules/python2.6/boto /tmp/boto.orig", only_if = os.path.exists("/usr/lib/pymodules/python2.6/boto")) # <REPLACE_END> <|endoftext|> import os from kokki import * # Package("python-boto") Execute("pip install git+http://github.com/boto/boto.git#egg=boto", not_if = 'python -c "import boto"') Execute("mv /usr/lib/pymodules/python2.6/boto /tmp/boto.orig", only_if = os.path.exists("/usr/lib/pymodules/python2.6/boto")) # Mount volumes and format is necessary for vol in env.config.aws.volumes: env.cookbooks.aws.EBSVolume(vol['volume_id'], availability_zone = env.config.aws.availability_zone, device = vol['device'], action = "attach") if vol.get('fstype'): if vol['fstype'] == "xfs": Package("xfsprogs") Execute("mkfs.%(fstype)s -f %(device)s" % vol, not_if = """if [ "`file -s %(device)s`" = "%(device)s: data" ]; then exit 1; fi""" % vol) if vol.get('mount_point'): Mount(vol['mount_point'], device = vol['device'], fstype = vol.get('fstype'), options = vol.get('fsoptions', ["noatime"]), action = ["mount", "enable"])
Install github verison of boto in aws cookbook (for now) from kokki import * Package("python-boto") # Mount volumes and format is necessary for vol in env.config.aws.volumes: env.cookbooks.aws.EBSVolume(vol['volume_id'], availability_zone = env.config.aws.availability_zone, device = vol['device'], action = "attach") if vol.get('fstype'): if vol['fstype'] == "xfs": Package("xfsprogs") Execute("mkfs.%(fstype)s -f %(device)s" % vol, not_if = """if [ "`file -s %(device)s`" = "%(device)s: data" ]; then exit 1; fi""" % vol) if vol.get('mount_point'): Mount(vol['mount_point'], device = vol['device'], fstype = vol.get('fstype'), options = vol.get('fsoptions', ["noatime"]), action = ["mount", "enable"])
cedbfda6e9c040c6924eae2eff0e9b4e9f3f93f0
api/core/helpers.py
api/core/helpers.py
import pprint from django.core.mail import EmailMessage import log from rest_framework.reverse import reverse from sesame.utils import get_query_string def send_login_email(user, request, *, welcome): assert user.email, f"User has no email: {user}" base = reverse('redirector', args=["login"], request=request) token = get_query_string(user) url = base + token # TODO: Convert this to an email template if welcome: subject = "Welcome to Voter Engagement" else: subject = "Greetings from Voter Engagement" body = f"Click here to log in: {url}" email = EmailMessage( subject=subject, body=body, from_email="Citizen Labs <noreply@citizenlabs.org>", to=[user.email], ) log.debug(f"Sending email: {prettify(email.__dict__)}") count = email.send(fail_silently=False) return count def prettify(data: dict): return "{\n " + pprint.pformat(data, indent=2)[1:-1] + ",\n}"
import pprint from django.core.mail import EmailMessage import log from rest_framework.reverse import reverse from sesame.utils import get_query_string def send_login_email(user, request, *, welcome): assert user.email, f"User has no email: {user}" base = reverse('redirector', args=["login"], request=request) token = get_query_string(user) url = base + token message = EmailMessage( subject=None, from_email="Citizen Labs <noreply@citizenlabs.org>", to=[user.email], ) if welcome: message.template_id = 'voter-engagement-welcome' else: message.template_id = 'voter-engagement-login' message.merge_global_data = { 'FIRST_NAME': user.first_name, 'LAST_NAME': user.last_name, 'LOGIN_URL': url, # TODO: Set site URL dynamically 'SITE_URL': 'https://alpha-vote.citizenlabs.org/', # TODO: Implement unsubscribe functionality 'UNSUBSCRIBE_URL': 'https://citizenlabs.org/contact/', 'ABOUT_URL': 'https://citizenlabs.org/about/', } log.debug(f"Sending email: {prettify(message.__dict__)}") count = message.send(fail_silently=False) return count def prettify(data: dict): return "{\n " + pprint.pformat(data, indent=2)[1:-1] + ",\n}"
Use Mandrill templates to send emails
Use Mandrill templates to send emails
Python
mit
citizenlabsgr/voter-engagement,citizenlabsgr/voter-engagement,citizenlabsgr/voter-engagement,citizenlabsgr/voter-engagement,citizenlabsgr/voter-engagement
<REPLACE_OLD> # TODO: Convert this to an email template if welcome: subject = "Welcome to Voter Engagement" else: subject = "Greetings from Voter Engagement" body = f"Click here to log in: {url}" email <REPLACE_NEW> message <REPLACE_END> <REPLACE_OLD> subject=subject, body=body, <REPLACE_NEW> subject=None, <REPLACE_END> <REPLACE_OLD> ) <REPLACE_NEW> ) if welcome: message.template_id = 'voter-engagement-welcome' else: message.template_id = 'voter-engagement-login' message.merge_global_data = { 'FIRST_NAME': user.first_name, 'LAST_NAME': user.last_name, 'LOGIN_URL': url, # TODO: Set site URL dynamically 'SITE_URL': 'https://alpha-vote.citizenlabs.org/', # TODO: Implement unsubscribe functionality 'UNSUBSCRIBE_URL': 'https://citizenlabs.org/contact/', 'ABOUT_URL': 'https://citizenlabs.org/about/', } <REPLACE_END> <REPLACE_OLD> {prettify(email.__dict__)}") <REPLACE_NEW> {prettify(message.__dict__)}") <REPLACE_END> <REPLACE_OLD> email.send(fail_silently=False) <REPLACE_NEW> message.send(fail_silently=False) <REPLACE_END> <|endoftext|> import pprint from django.core.mail import EmailMessage import log from rest_framework.reverse import reverse from sesame.utils import get_query_string def send_login_email(user, request, *, welcome): assert user.email, f"User has no email: {user}" base = reverse('redirector', args=["login"], request=request) token = get_query_string(user) url = base + token message = EmailMessage( subject=None, from_email="Citizen Labs <noreply@citizenlabs.org>", to=[user.email], ) if welcome: message.template_id = 'voter-engagement-welcome' else: message.template_id = 'voter-engagement-login' message.merge_global_data = { 'FIRST_NAME': user.first_name, 'LAST_NAME': user.last_name, 'LOGIN_URL': url, # TODO: Set site URL dynamically 'SITE_URL': 'https://alpha-vote.citizenlabs.org/', # TODO: Implement unsubscribe functionality 'UNSUBSCRIBE_URL': 'https://citizenlabs.org/contact/', 'ABOUT_URL': 'https://citizenlabs.org/about/', } log.debug(f"Sending email: {prettify(message.__dict__)}") count = message.send(fail_silently=False) return count def prettify(data: dict): return "{\n " + pprint.pformat(data, indent=2)[1:-1] + ",\n}"
Use Mandrill templates to send emails import pprint from django.core.mail import EmailMessage import log from rest_framework.reverse import reverse from sesame.utils import get_query_string def send_login_email(user, request, *, welcome): assert user.email, f"User has no email: {user}" base = reverse('redirector', args=["login"], request=request) token = get_query_string(user) url = base + token # TODO: Convert this to an email template if welcome: subject = "Welcome to Voter Engagement" else: subject = "Greetings from Voter Engagement" body = f"Click here to log in: {url}" email = EmailMessage( subject=subject, body=body, from_email="Citizen Labs <noreply@citizenlabs.org>", to=[user.email], ) log.debug(f"Sending email: {prettify(email.__dict__)}") count = email.send(fail_silently=False) return count def prettify(data: dict): return "{\n " + pprint.pformat(data, indent=2)[1:-1] + ",\n}"
65838da6a7353ae084a13565ea2dd55368a27176
history/migrations/0007_auto_20141026_2348.py
history/migrations/0007_auto_20141026_2348.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('history', '0006_committeemember_member'), ] operations = [ migrations.AlterField( model_name='meetingminutes', name='meeting_type', field=models.CharField(default=b'MM', max_length=2, choices=[(b'NI', b'New Initiatives'), (b'MM', b'Main Meetings'), (b'OF', b'Officer Meetings'), (b'AD', b'Advisory Board Meetings'), (b'CM', b'Committee Meeting Minutes')]), ), ]
Update meeting minutes model (committees)
Update meeting minutes model (committees)
Python
apache-2.0
tbpmig/mig-website,tbpmig/mig-website,tbpmig/mig-website
<INSERT> # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): <INSERT_END> <INSERT> dependencies = [ ('history', '0006_committeemember_member'), ] operations = [ migrations.AlterField( model_name='meetingminutes', name='meeting_type', field=models.CharField(default=b'MM', max_length=2, choices=[(b'NI', b'New Initiatives'), (b'MM', b'Main Meetings'), (b'OF', b'Officer Meetings'), (b'AD', b'Advisory Board Meetings'), (b'CM', b'Committee Meeting Minutes')]), ), ] <INSERT_END> <|endoftext|> # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('history', '0006_committeemember_member'), ] operations = [ migrations.AlterField( model_name='meetingminutes', name='meeting_type', field=models.CharField(default=b'MM', max_length=2, choices=[(b'NI', b'New Initiatives'), (b'MM', b'Main Meetings'), (b'OF', b'Officer Meetings'), (b'AD', b'Advisory Board Meetings'), (b'CM', b'Committee Meeting Minutes')]), ), ]
Update meeting minutes model (committees)
f3c4bac262c6d09730b3f0c4a24639fde8b4d923
gunicorn-app.py
gunicorn-app.py
from __future__ import unicode_literals import multiprocessing import gunicorn.app.base from gunicorn.six import iteritems def number_of_workers(): return (multiprocessing.cpu_count() * 2) + 1 def handler_app(environ, start_response): response_body = b'Works fine' status = '200 OK' response_headers = [ ('Content-Type', 'text/plain'), ] start_response(status, response_headers) return [response_body] class StandaloneApplication(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super(StandaloneApplication, self).__init__() def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): return self.application if __name__ == '__main__': options = { 'bind': '%s:%s' % ('127.0.0.1', '8080'), 'workers': number_of_workers(), } StandaloneApplication(handler_app, options).run()
Add wsgi compatible example gunicorn application
Add wsgi compatible example gunicorn application
Python
mit
voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts,voidabhi/python-scripts
<REPLACE_OLD> <REPLACE_NEW> from __future__ import unicode_literals import multiprocessing import gunicorn.app.base from gunicorn.six import iteritems def number_of_workers(): return (multiprocessing.cpu_count() * 2) + 1 def handler_app(environ, start_response): response_body = b'Works fine' status = '200 OK' response_headers = [ ('Content-Type', 'text/plain'), ] start_response(status, response_headers) return [response_body] class StandaloneApplication(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super(StandaloneApplication, self).__init__() def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): return self.application if __name__ == '__main__': options = { 'bind': '%s:%s' % ('127.0.0.1', '8080'), 'workers': number_of_workers(), } StandaloneApplication(handler_app, options).run() <REPLACE_END> <|endoftext|> from __future__ import unicode_literals import multiprocessing import gunicorn.app.base from gunicorn.six import iteritems def number_of_workers(): return (multiprocessing.cpu_count() * 2) + 1 def handler_app(environ, start_response): response_body = b'Works fine' status = '200 OK' response_headers = [ ('Content-Type', 'text/plain'), ] start_response(status, response_headers) return [response_body] class StandaloneApplication(gunicorn.app.base.BaseApplication): def __init__(self, app, options=None): self.options = options or {} self.application = app super(StandaloneApplication, self).__init__() def load_config(self): config = dict([(key, value) for key, value in iteritems(self.options) if key in self.cfg.settings and value is not None]) for key, value in iteritems(config): self.cfg.set(key.lower(), value) def load(self): return self.application if __name__ == '__main__': options = { 'bind': '%s:%s' % ('127.0.0.1', '8080'), 'workers': number_of_workers(), } StandaloneApplication(handler_app, options).run()
Add wsgi compatible example gunicorn application
222628c6747bdc3574bcb7cf6257c785ffa6451d
inventory_control/database/sql.py
inventory_control/database/sql.py
""" So this is where all the SQL commands live """ CREATE_SQL = """ CREATE TABLE component_type ( id INT PRIMARY KEY AUTO_INCREMENT, type VARCHAR(255) UNIQUE ); CREATE TABLE components ( id INT PRIMARY KEY AUTO_INCREMENT, sku TEXT, type INT, status INT, FOREIGN KEY (type) REFERENCES component_type(id) ); CREATE TABLE projects ( id INT PRIMARY KEY AUTO_INCREMENT, motherboard INT, power_supply INT, cpu INT, hard_drive INT, proj_case INT, memory INT, FOREIGN KEY (motherboard) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (cpu) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (power_supply) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (hard_drive) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (proj_case) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (memory) REFERENCES components(id) ON DELETE CASCADE ); """ ADD_COMPONENT_TYPE = """INSERT IGNORE INTO component_type (type) VALUES ('{text}') """ GET_COMPONENT_TYPE="""SELECT * FROM component_type WHERE type='{text}'""" DELETE_COMPONENT_TYPE = """DELETE FROM component_type WHERE type='{text}' """ SELECT_ALL_COMPONENTS = """ SELECT * FROM components INNER JOIN component_type ON components.type = component_type.id; """ DROP_SQL = """ DROP TABLE projects; DROP TABLE components; DROP TABLE component_type; """
""" So this is where all the SQL commands live """ CREATE_SQL = """ CREATE TABLE component_type ( id INT PRIMARY KEY AUTO_INCREMENT, type VARCHAR(255) UNIQUE ); CREATE TABLE components ( id INT PRIMARY KEY AUTO_INCREMENT, serial_number VARCHAR(255), sku TEXT, type INT, status INT, FOREIGN KEY (type) REFERENCES component_type(id) ); CREATE TABLE projects ( id INT PRIMARY KEY AUTO_INCREMENT, product_number INT, motherboard INT, power_supply INT, cpu INT, hard_drive INT, proj_case INT, memory INT, FOREIGN KEY (motherboard) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (cpu) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (power_supply) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (hard_drive) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (proj_case) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (memory) REFERENCES components(id) ON DELETE CASCADE ); """ ADD_COMPONENT_TYPE = """INSERT IGNORE INTO component_type (type) VALUES ('{text}') """ GET_COMPONENT_TYPE="""SELECT * FROM component_type WHERE type='{text}'""" DELETE_COMPONENT_TYPE = """DELETE FROM component_type WHERE type='{text}' """ SELECT_ALL_COMPONENTS = """ SELECT * FROM components INNER JOIN component_type ON components.type = component_type.id; """ DROP_SQL = """ DROP TABLE projects; DROP TABLE components; DROP TABLE component_type; """
Add product_number and serial_number identifiers
Add product_number and serial_number identifiers
Python
mit
worldcomputerxchange/inventory-control,codeforsanjose/inventory-control
<INSERT> serial_number VARCHAR(255), <INSERT_END> <INSERT> product_number INT, <INSERT_END> <|endoftext|> """ So this is where all the SQL commands live """ CREATE_SQL = """ CREATE TABLE component_type ( id INT PRIMARY KEY AUTO_INCREMENT, type VARCHAR(255) UNIQUE ); CREATE TABLE components ( id INT PRIMARY KEY AUTO_INCREMENT, serial_number VARCHAR(255), sku TEXT, type INT, status INT, FOREIGN KEY (type) REFERENCES component_type(id) ); CREATE TABLE projects ( id INT PRIMARY KEY AUTO_INCREMENT, product_number INT, motherboard INT, power_supply INT, cpu INT, hard_drive INT, proj_case INT, memory INT, FOREIGN KEY (motherboard) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (cpu) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (power_supply) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (hard_drive) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (proj_case) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (memory) REFERENCES components(id) ON DELETE CASCADE ); """ ADD_COMPONENT_TYPE = """INSERT IGNORE INTO component_type (type) VALUES ('{text}') """ GET_COMPONENT_TYPE="""SELECT * FROM component_type WHERE type='{text}'""" DELETE_COMPONENT_TYPE = """DELETE FROM component_type WHERE type='{text}' """ SELECT_ALL_COMPONENTS = """ SELECT * FROM components INNER JOIN component_type ON components.type = component_type.id; """ DROP_SQL = """ DROP TABLE projects; DROP TABLE components; DROP TABLE component_type; """
Add product_number and serial_number identifiers """ So this is where all the SQL commands live """ CREATE_SQL = """ CREATE TABLE component_type ( id INT PRIMARY KEY AUTO_INCREMENT, type VARCHAR(255) UNIQUE ); CREATE TABLE components ( id INT PRIMARY KEY AUTO_INCREMENT, sku TEXT, type INT, status INT, FOREIGN KEY (type) REFERENCES component_type(id) ); CREATE TABLE projects ( id INT PRIMARY KEY AUTO_INCREMENT, motherboard INT, power_supply INT, cpu INT, hard_drive INT, proj_case INT, memory INT, FOREIGN KEY (motherboard) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (cpu) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (power_supply) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (hard_drive) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (proj_case) REFERENCES components(id) ON DELETE CASCADE, FOREIGN KEY (memory) REFERENCES components(id) ON DELETE CASCADE ); """ ADD_COMPONENT_TYPE = """INSERT IGNORE INTO component_type (type) VALUES ('{text}') """ GET_COMPONENT_TYPE="""SELECT * FROM component_type WHERE type='{text}'""" DELETE_COMPONENT_TYPE = """DELETE FROM component_type WHERE type='{text}' """ SELECT_ALL_COMPONENTS = """ SELECT * FROM components INNER JOIN component_type ON components.type = component_type.id; """ DROP_SQL = """ DROP TABLE projects; DROP TABLE components; DROP TABLE component_type; """
7db27a3629e442c99abd24503f08d982b6a30e33
packages/Python/lldbsuite/test/lang/objc/modules-cache/TestClangModulesCache.py
packages/Python/lldbsuite/test/lang/objc/modules-cache/TestClangModulesCache.py
"""Test that the clang modules cache directory can be controlled.""" from __future__ import print_function import unittest2 import os import time import platform import shutil import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class ObjCModulesTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) def setUp(self): TestBase.setUp(self) def test_expr(self): self.build() self.main_source_file = lldb.SBFileSpec("main.m") self.runCmd("settings set target.auto-import-clang-modules true") mod_cache = self.getBuildArtifact("my-clang-modules-cache") if os.path.isdir(mod_cache): shutil.rmtree(mod_cache) self.assertFalse(os.path.isdir(mod_cache), "module cache should not exist") self.runCmd('settings set symbols.clang-modules-cache-path "%s"' % mod_cache) self.runCmd('settings set target.clang-module-search-paths "%s"' % self.getSourceDir()) (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint( self, "Set breakpoint here", self.main_source_file) self.runCmd("expr @import Darwin") self.assertTrue(os.path.isdir(mod_cache), "module cache exists")
"""Test that the clang modules cache directory can be controlled.""" from __future__ import print_function import unittest2 import os import time import platform import shutil import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class ObjCModulesTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) def setUp(self): TestBase.setUp(self) @skipUnlessDarwin def test_expr(self): self.build() self.main_source_file = lldb.SBFileSpec("main.m") self.runCmd("settings set target.auto-import-clang-modules true") mod_cache = self.getBuildArtifact("my-clang-modules-cache") if os.path.isdir(mod_cache): shutil.rmtree(mod_cache) self.assertFalse(os.path.isdir(mod_cache), "module cache should not exist") self.runCmd('settings set symbols.clang-modules-cache-path "%s"' % mod_cache) self.runCmd('settings set target.clang-module-search-paths "%s"' % self.getSourceDir()) (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint( self, "Set breakpoint here", self.main_source_file) self.runCmd("expr @import Foo") self.assertTrue(os.path.isdir(mod_cache), "module cache exists")
Mark ObjC testcase as skipUnlessDarwin and fix a typo in test function.
Mark ObjC testcase as skipUnlessDarwin and fix a typo in test function. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@326640 91177308-0d34-0410-b5e6-96231b3b80d8 (cherry picked from commit cb9b1a2163f960e34721f74bad30622fda71e43b)
Python
apache-2.0
apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb,apple/swift-lldb
<INSERT> @skipUnlessDarwin <INSERT_END> <REPLACE_OLD> Darwin") <REPLACE_NEW> Foo") <REPLACE_END> <|endoftext|> """Test that the clang modules cache directory can be controlled.""" from __future__ import print_function import unittest2 import os import time import platform import shutil import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class ObjCModulesTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) def setUp(self): TestBase.setUp(self) @skipUnlessDarwin def test_expr(self): self.build() self.main_source_file = lldb.SBFileSpec("main.m") self.runCmd("settings set target.auto-import-clang-modules true") mod_cache = self.getBuildArtifact("my-clang-modules-cache") if os.path.isdir(mod_cache): shutil.rmtree(mod_cache) self.assertFalse(os.path.isdir(mod_cache), "module cache should not exist") self.runCmd('settings set symbols.clang-modules-cache-path "%s"' % mod_cache) self.runCmd('settings set target.clang-module-search-paths "%s"' % self.getSourceDir()) (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint( self, "Set breakpoint here", self.main_source_file) self.runCmd("expr @import Foo") self.assertTrue(os.path.isdir(mod_cache), "module cache exists")
Mark ObjC testcase as skipUnlessDarwin and fix a typo in test function. git-svn-id: 4c4cc70b1ef44ba2b7963015e681894188cea27e@326640 91177308-0d34-0410-b5e6-96231b3b80d8 (cherry picked from commit cb9b1a2163f960e34721f74bad30622fda71e43b) """Test that the clang modules cache directory can be controlled.""" from __future__ import print_function import unittest2 import os import time import platform import shutil import lldb from lldbsuite.test.decorators import * from lldbsuite.test.lldbtest import * from lldbsuite.test import lldbutil class ObjCModulesTestCase(TestBase): NO_DEBUG_INFO_TESTCASE = True mydir = TestBase.compute_mydir(__file__) def setUp(self): TestBase.setUp(self) def test_expr(self): self.build() self.main_source_file = lldb.SBFileSpec("main.m") self.runCmd("settings set target.auto-import-clang-modules true") mod_cache = self.getBuildArtifact("my-clang-modules-cache") if os.path.isdir(mod_cache): shutil.rmtree(mod_cache) self.assertFalse(os.path.isdir(mod_cache), "module cache should not exist") self.runCmd('settings set symbols.clang-modules-cache-path "%s"' % mod_cache) self.runCmd('settings set target.clang-module-search-paths "%s"' % self.getSourceDir()) (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint( self, "Set breakpoint here", self.main_source_file) self.runCmd("expr @import Darwin") self.assertTrue(os.path.isdir(mod_cache), "module cache exists")
46afcd0e5e958e22647ef9c708918489027277e2
modeltranslation/tests/settings.py
modeltranslation/tests/settings.py
# -*- coding: utf-8 -*- """ Settings overrided for test time """ import os from django.conf import settings DIRNAME = os.path.dirname(__file__) INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ( 'modeltranslation.tests', ) # IMO this is unimportant #if django.VERSION[0] >= 1 and django.VERSION[1] >= 3: #INSTALLED_APPS += ('django.contrib.staticfiles',) #STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(DIRNAME, 'media/') LANGUAGES = (('de', 'Deutsch'), ('en', 'English')) LANGUAGE_CODE = 'de' MODELTRANSLATION_DEFAULT_LANGUAGE = 'de' USE_I18N = True MODELTRANSLATION_AUTO_POPULATE = False MODELTRANSLATION_FALLBACK_LANGUAGES = ()
# -*- coding: utf-8 -*- """ Settings overrided for test time """ import os from django.conf import settings DIRNAME = os.path.dirname(__file__) INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ( 'modeltranslation.tests', ) # IMO this is unimportant #if django.VERSION[0] >= 1 and django.VERSION[1] >= 3: #INSTALLED_APPS += ('django.contrib.staticfiles',) #STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(DIRNAME, 'media/') LANGUAGES = (('de', 'Deutsch'), ('en', 'English')) LANGUAGE_CODE = 'de' MODELTRANSLATION_DEFAULT_LANGUAGE = 'de' USE_I18N = True USE_TZ = False MODELTRANSLATION_AUTO_POPULATE = False MODELTRANSLATION_FALLBACK_LANGUAGES = ()
Disable timezone support for tests, as the date / time fields' tests use naive datatime objects and fail if it's enabled.
Disable timezone support for tests, as the date / time fields' tests use naive datatime objects and fail if it's enabled.
Python
bsd-3-clause
extertioner/django-modeltranslation,marctc/django-modeltranslation,yoza/django-modeltranslation,nanuxbe/django-modeltranslation,akheron/django-modeltranslation,vstoykov/django-modeltranslation,SideStudios/django-modeltranslation,yoza/django-modeltranslation,marctc/django-modeltranslation,deschler/django-modeltranslation,akheron/django-modeltranslation,nanuxbe/django-modeltranslation,extertioner/django-modeltranslation,deschler/django-modeltranslation,SideStudios/django-modeltranslation,vstoykov/django-modeltranslation
<REPLACE_OLD> True MODELTRANSLATION_AUTO_POPULATE <REPLACE_NEW> True USE_TZ = False MODELTRANSLATION_AUTO_POPULATE <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- """ Settings overrided for test time """ import os from django.conf import settings DIRNAME = os.path.dirname(__file__) INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ( 'modeltranslation.tests', ) # IMO this is unimportant #if django.VERSION[0] >= 1 and django.VERSION[1] >= 3: #INSTALLED_APPS += ('django.contrib.staticfiles',) #STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(DIRNAME, 'media/') LANGUAGES = (('de', 'Deutsch'), ('en', 'English')) LANGUAGE_CODE = 'de' MODELTRANSLATION_DEFAULT_LANGUAGE = 'de' USE_I18N = True USE_TZ = False MODELTRANSLATION_AUTO_POPULATE = False MODELTRANSLATION_FALLBACK_LANGUAGES = ()
Disable timezone support for tests, as the date / time fields' tests use naive datatime objects and fail if it's enabled. # -*- coding: utf-8 -*- """ Settings overrided for test time """ import os from django.conf import settings DIRNAME = os.path.dirname(__file__) INSTALLED_APPS = tuple(settings.INSTALLED_APPS) + ( 'modeltranslation.tests', ) # IMO this is unimportant #if django.VERSION[0] >= 1 and django.VERSION[1] >= 3: #INSTALLED_APPS += ('django.contrib.staticfiles',) #STATIC_URL = '/static/' MEDIA_URL = '/media/' MEDIA_ROOT = os.path.join(DIRNAME, 'media/') LANGUAGES = (('de', 'Deutsch'), ('en', 'English')) LANGUAGE_CODE = 'de' MODELTRANSLATION_DEFAULT_LANGUAGE = 'de' USE_I18N = True MODELTRANSLATION_AUTO_POPULATE = False MODELTRANSLATION_FALLBACK_LANGUAGES = ()
6a940fbd0cc8c4e4a9f17423c593452d010b6883
app/lib/query/__init__.py
app/lib/query/__init__.py
# -*- coding: utf-8 -*- """ Initialisation file for query directory. """
# -*- coding: utf-8 -*- """ Initialisation file for query directory, relating to local database queries. """
Update query init file docstring.
Update query init file docstring.
Python
mit
MichaelCurrin/twitterverse,MichaelCurrin/twitterverse
<REPLACE_OLD> directory. """ <REPLACE_NEW> directory, relating to local database queries. """ <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- """ Initialisation file for query directory, relating to local database queries. """
Update query init file docstring. # -*- coding: utf-8 -*- """ Initialisation file for query directory. """
1c2b6c0daea1d04985ef6ddff35527ba207ec191
qual/tests/test_calendar.py
qual/tests/test_calendar.py
import unittest from datetime import date import qual class TestProlepticGregorianCalendar(unittest.TestCase): def setUp(self): self.calendar = qual.ProlepticGregorianCalendar() def check_valid_date(self, year, month, day): d = self.calendar.date(year, month, day) self.assertIsNotNone(d) def test_leap_year_from_before_1582(self): """Pope Gregory introduced the calendar in 1582""" self.check_valid_date(1200, 2, 29)
import unittest from datetime import date import qual class TestProlepticGregorianCalendar(unittest.TestCase): def setUp(self): self.calendar = qual.ProlepticGregorianCalendar() def check_valid_date(self, year, month, day): d = self.calendar.date(year, month, day) self.assertIsNotNone(d) def check_invalid_date(self, year, month, day): self.assertRaises(Exception, lambda : self.calendar(year, month, day)) def test_leap_year_from_before_1582(self): """Pope Gregory introduced the calendar in 1582""" self.check_valid_date(1200, 2, 29) def test_Julian_leap_day_is_not_a_valid_date(self): """This day /was/ a leap day contemporaneously, but is not a valid date of the Gregorian calendar.""" self.check_invalid_date(1300, 2, 29)
Check that a certain date is invalid.
Check that a certain date is invalid. This distinguishes correctly between the proleptic Gregorian calendar, and the historical or astronomical calendars, where this date would be valid.
Python
apache-2.0
jwg4/calexicon,jwg4/qual
<INSERT> check_invalid_date(self, year, month, day): self.assertRaises(Exception, lambda : self.calendar(year, month, day)) def <INSERT_END> <REPLACE_OLD> 29) <REPLACE_NEW> 29) def test_Julian_leap_day_is_not_a_valid_date(self): """This day /was/ a leap day contemporaneously, but is not a valid date of the Gregorian calendar.""" self.check_invalid_date(1300, 2, 29) <REPLACE_END> <|endoftext|> import unittest from datetime import date import qual class TestProlepticGregorianCalendar(unittest.TestCase): def setUp(self): self.calendar = qual.ProlepticGregorianCalendar() def check_valid_date(self, year, month, day): d = self.calendar.date(year, month, day) self.assertIsNotNone(d) def check_invalid_date(self, year, month, day): self.assertRaises(Exception, lambda : self.calendar(year, month, day)) def test_leap_year_from_before_1582(self): """Pope Gregory introduced the calendar in 1582""" self.check_valid_date(1200, 2, 29) def test_Julian_leap_day_is_not_a_valid_date(self): """This day /was/ a leap day contemporaneously, but is not a valid date of the Gregorian calendar.""" self.check_invalid_date(1300, 2, 29)
Check that a certain date is invalid. This distinguishes correctly between the proleptic Gregorian calendar, and the historical or astronomical calendars, where this date would be valid. import unittest from datetime import date import qual class TestProlepticGregorianCalendar(unittest.TestCase): def setUp(self): self.calendar = qual.ProlepticGregorianCalendar() def check_valid_date(self, year, month, day): d = self.calendar.date(year, month, day) self.assertIsNotNone(d) def test_leap_year_from_before_1582(self): """Pope Gregory introduced the calendar in 1582""" self.check_valid_date(1200, 2, 29)
5cd459485fee2611eb96229b0a600b3a21f4fc13
stagecraft/apps/datasets/admin/backdrop_user.py
stagecraft/apps/datasets/admin/backdrop_user.py
from __future__ import unicode_literals from django.contrib import admin from django.db import models import reversion from stagecraft.apps.datasets.models.backdrop_user import BackdropUser from stagecraft.apps.datasets.models.data_set import DataSet class DataSetInline(admin.StackedInline): model = DataSet fields = ('name',) extra = 0 class BackdropUserAdmin(reversion.VersionAdmin): search_fields = ['email'] list_display = ('email', 'numer_of_datasets_user_has_access_to',) list_per_page = 30 filter_horizontal = ('data_sets',) def queryset(self, request): return BackdropUser.objects.annotate( dataset_count=models.Count('data_sets') ) def numer_of_datasets_user_has_access_to(self, obj): return obj.dataset_count numer_of_datasets_user_has_access_to.admin_order_field = 'dataset_count' admin.site.register(BackdropUser, BackdropUserAdmin)
from __future__ import unicode_literals from django.contrib import admin from django.db import models import reversion from stagecraft.apps.datasets.models.backdrop_user import BackdropUser from stagecraft.apps.datasets.models.data_set import DataSet class DataSetInline(admin.StackedInline): model = DataSet fields = ('name',) extra = 0 class BackdropUserAdmin(reversion.VersionAdmin): search_fields = ['email'] list_display = ('email', 'number_of_datasets_user_has_access_to',) list_per_page = 30 filter_horizontal = ('data_sets',) def queryset(self, request): return BackdropUser.objects.annotate( dataset_count=models.Count('data_sets') ) def number_of_datasets_user_has_access_to(self, obj): return obj.dataset_count number_of_datasets_user_has_access_to.admin_order_field = 'dataset_count' admin.site.register(BackdropUser, BackdropUserAdmin)
Fix typo in BackdropUser admin model
Fix typo in BackdropUser admin model
Python
mit
alphagov/stagecraft,alphagov/stagecraft,alphagov/stagecraft,alphagov/stagecraft
<REPLACE_OLD> 'numer_of_datasets_user_has_access_to',) <REPLACE_NEW> 'number_of_datasets_user_has_access_to',) <REPLACE_END> <REPLACE_OLD> numer_of_datasets_user_has_access_to(self, <REPLACE_NEW> number_of_datasets_user_has_access_to(self, <REPLACE_END> <REPLACE_OLD> numer_of_datasets_user_has_access_to.admin_order_field <REPLACE_NEW> number_of_datasets_user_has_access_to.admin_order_field <REPLACE_END> <|endoftext|> from __future__ import unicode_literals from django.contrib import admin from django.db import models import reversion from stagecraft.apps.datasets.models.backdrop_user import BackdropUser from stagecraft.apps.datasets.models.data_set import DataSet class DataSetInline(admin.StackedInline): model = DataSet fields = ('name',) extra = 0 class BackdropUserAdmin(reversion.VersionAdmin): search_fields = ['email'] list_display = ('email', 'number_of_datasets_user_has_access_to',) list_per_page = 30 filter_horizontal = ('data_sets',) def queryset(self, request): return BackdropUser.objects.annotate( dataset_count=models.Count('data_sets') ) def number_of_datasets_user_has_access_to(self, obj): return obj.dataset_count number_of_datasets_user_has_access_to.admin_order_field = 'dataset_count' admin.site.register(BackdropUser, BackdropUserAdmin)
Fix typo in BackdropUser admin model from __future__ import unicode_literals from django.contrib import admin from django.db import models import reversion from stagecraft.apps.datasets.models.backdrop_user import BackdropUser from stagecraft.apps.datasets.models.data_set import DataSet class DataSetInline(admin.StackedInline): model = DataSet fields = ('name',) extra = 0 class BackdropUserAdmin(reversion.VersionAdmin): search_fields = ['email'] list_display = ('email', 'numer_of_datasets_user_has_access_to',) list_per_page = 30 filter_horizontal = ('data_sets',) def queryset(self, request): return BackdropUser.objects.annotate( dataset_count=models.Count('data_sets') ) def numer_of_datasets_user_has_access_to(self, obj): return obj.dataset_count numer_of_datasets_user_has_access_to.admin_order_field = 'dataset_count' admin.site.register(BackdropUser, BackdropUserAdmin)
97529500f335ee1f75a6229c1c313bc7ff2d68ef
satchmo/apps/satchmo_store/shop/management/commands/satchmo_copy_static.py
satchmo/apps/satchmo_store/shop/management/commands/satchmo_copy_static.py
from django.core.management.base import NoArgsCommand import os import shutil class Command(NoArgsCommand): help = "Copy the satchmo static directory and files to the local project." def handle_noargs(self, **options): import satchmo_store static_src = os.path.join(satchmo_store.__path__[0],'static') static_dest = os.path.join(os.getcwd(), 'static') if os.path.exists(static_dest): print "Static directory exists. You must manually copy the files you need." else: shutil.copytree(static_src, static_dest) for root, dirs, files in os.walk(static_dest): if '.svn' in dirs: shutil.rmtree(os.path.join(root,'.svn'), True) print "Copied %s to %s" % (static_src, static_dest)
from django.core.management.base import NoArgsCommand import os import shutil class Command(NoArgsCommand): help = "Copy the satchmo static directory and files to the local project." def handle_noargs(self, **options): import satchmo_store static_src = os.path.join(satchmo_store.__path__[0],'../../static') static_dest = os.path.join(os.getcwd(), 'static') if os.path.exists(static_dest): print "Static directory exists. You must manually copy the files you need." else: shutil.copytree(static_src, static_dest) for root, dirs, files in os.walk(static_dest): if '.svn' in dirs: shutil.rmtree(os.path.join(root,'.svn'), True) print "Copied %s to %s" % (static_src, static_dest)
Change the static copy command so it works with new layout.
Change the static copy command so it works with new layout. --HG-- extra : convert_revision : svn%3Aa38d40e9-c014-0410-b785-c606c0c8e7de/satchmo/trunk%401887
Python
bsd-3-clause
dokterbob/satchmo,Ryati/satchmo,twidi/satchmo,ringemup/satchmo,ringemup/satchmo,Ryati/satchmo,twidi/satchmo,dokterbob/satchmo
<REPLACE_OLD> os.path.join(satchmo_store.__path__[0],'static') <REPLACE_NEW> os.path.join(satchmo_store.__path__[0],'../../static') <REPLACE_END> <|endoftext|> from django.core.management.base import NoArgsCommand import os import shutil class Command(NoArgsCommand): help = "Copy the satchmo static directory and files to the local project." def handle_noargs(self, **options): import satchmo_store static_src = os.path.join(satchmo_store.__path__[0],'../../static') static_dest = os.path.join(os.getcwd(), 'static') if os.path.exists(static_dest): print "Static directory exists. You must manually copy the files you need." else: shutil.copytree(static_src, static_dest) for root, dirs, files in os.walk(static_dest): if '.svn' in dirs: shutil.rmtree(os.path.join(root,'.svn'), True) print "Copied %s to %s" % (static_src, static_dest)
Change the static copy command so it works with new layout. --HG-- extra : convert_revision : svn%3Aa38d40e9-c014-0410-b785-c606c0c8e7de/satchmo/trunk%401887 from django.core.management.base import NoArgsCommand import os import shutil class Command(NoArgsCommand): help = "Copy the satchmo static directory and files to the local project." def handle_noargs(self, **options): import satchmo_store static_src = os.path.join(satchmo_store.__path__[0],'static') static_dest = os.path.join(os.getcwd(), 'static') if os.path.exists(static_dest): print "Static directory exists. You must manually copy the files you need." else: shutil.copytree(static_src, static_dest) for root, dirs, files in os.walk(static_dest): if '.svn' in dirs: shutil.rmtree(os.path.join(root,'.svn'), True) print "Copied %s to %s" % (static_src, static_dest)
7f7e606cc15e24190880d7388d07623be783a384
src/address_extractor/__init__.py
src/address_extractor/__init__.py
from .__main__ import main __version__ = '1.0.0' __title__ = 'address_extractor' __description__ = '' __url__ = '' __author__ = 'Scott Colby' __email__ = '' __license__ = 'MIT License' __copyright__ = 'Copyright (c) 2015 Scott Colby' __all__ = [ 'address_extractor' ]
from .__main__ import main from .__main__ import parsed_address_to_human __version__ = '1.0.0' __title__ = 'address_extractor' __description__ = '' __url__ = '' __author__ = 'Scott Colby' __email__ = '' __license__ = 'MIT License' __copyright__ = 'Copyright (c) 2015 Scott Colby' __all__ = [ 'main', 'parsed_address_to_human' ]
Change importing structure in init
Change importing structure in init
Python
mit
scolby33/address_extractor
<REPLACE_OLD> main __version__ <REPLACE_NEW> main from .__main__ import parsed_address_to_human __version__ <REPLACE_END> <REPLACE_OLD> 'address_extractor' ] <REPLACE_NEW> 'main', 'parsed_address_to_human' ] <REPLACE_END> <|endoftext|> from .__main__ import main from .__main__ import parsed_address_to_human __version__ = '1.0.0' __title__ = 'address_extractor' __description__ = '' __url__ = '' __author__ = 'Scott Colby' __email__ = '' __license__ = 'MIT License' __copyright__ = 'Copyright (c) 2015 Scott Colby' __all__ = [ 'main', 'parsed_address_to_human' ]
Change importing structure in init from .__main__ import main __version__ = '1.0.0' __title__ = 'address_extractor' __description__ = '' __url__ = '' __author__ = 'Scott Colby' __email__ = '' __license__ = 'MIT License' __copyright__ = 'Copyright (c) 2015 Scott Colby' __all__ = [ 'address_extractor' ]
e4f7deee8c4154781c2e945bfc14cf2028586dc1
hellopython/print_method/__init__.py
hellopython/print_method/__init__.py
import codecs import io import sys from workshopper.problems import BaseProblem class Problem(BaseProblem): def test(self, file): old_stdout = sys.stdout sys.stdout = io.StringIO() eval(codecs.open(file).read()) message = sys.stdout.getvalue() sys.stdout = old_stdout assert message == 'Hello World\n'
import codecs import io import sys from workshopper.problems import BaseProblem class Problem(BaseProblem): title = 'Print method' def test(self, file): old_stdout = sys.stdout sys.stdout = io.StringIO() eval(codecs.open(file).read()) message = sys.stdout.getvalue() sys.stdout = old_stdout assert message == 'Hello World\n'
Add a title to the print_method problem
Add a title to the print_method problem
Python
mit
pyschool/hipyschool
<REPLACE_OLD> Problem(BaseProblem): <REPLACE_NEW> Problem(BaseProblem): title = 'Print method' <REPLACE_END> <|endoftext|> import codecs import io import sys from workshopper.problems import BaseProblem class Problem(BaseProblem): title = 'Print method' def test(self, file): old_stdout = sys.stdout sys.stdout = io.StringIO() eval(codecs.open(file).read()) message = sys.stdout.getvalue() sys.stdout = old_stdout assert message == 'Hello World\n'
Add a title to the print_method problem import codecs import io import sys from workshopper.problems import BaseProblem class Problem(BaseProblem): def test(self, file): old_stdout = sys.stdout sys.stdout = io.StringIO() eval(codecs.open(file).read()) message = sys.stdout.getvalue() sys.stdout = old_stdout assert message == 'Hello World\n'
8ff8b9400adf24e082908befed7788099b01f328
bench/pact-suite/scripts/opcount_merge.py
bench/pact-suite/scripts/opcount_merge.py
#!/usr/bin/env python2.7 import sys files = sys.argv[1:] keys = set() fileVals = [] for file in files: vals = {} fileVals.append(vals) try: for line in open(file).readlines(): k, v = line.split() vals[k] = v keys.add(k) except Exception, e: print "Error in line \"%s\" of file %s" % (line, file) raise e for key in sorted(keys): sys.stdout.write(key) for vals in fileVals: sys.stdout.write("\t") sys.stdout.write(str(vals.get(key, 0))) sys.stdout.write("\n")
#!/usr/bin/env python2.7 import sys files = sys.argv[1:] keys = set() fileVals = [] for file in files: vals = {} fileVals.append(vals) try: for line in open(file).readlines(): toks = line.split() if len(toks) != 2: print >> sys.stderr, "Bad line: %s" % repr(toks) else: k, v = toks vals[k] = v keys.add(k) except Exception, e: print >> sys.stderr, "Error in line \"%s\" of file %s" % (line, file) raise e for key in sorted(keys): sys.stdout.write(key) for vals in fileVals: sys.stdout.write("\t") sys.stdout.write(str(vals.get(key, 0))) sys.stdout.write("\n")
Add initial data for operations counts
Add initial data for operations counts git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12204 dc4e9af1-7f46-4ead-bba6-71afc04862de
Python
apache-2.0
basheersubei/swift-t,blue42u/swift-t,swift-lang/swift-t,JohnPJenkins/swift-t,swift-lang/swift-t,blue42u/swift-t,swift-lang/swift-t,swift-lang/swift-t,JohnPJenkins/swift-t,basheersubei/swift-t,JohnPJenkins/swift-t,JohnPJenkins/swift-t,JohnPJenkins/swift-t,basheersubei/swift-t,basheersubei/swift-t,basheersubei/swift-t,JohnPJenkins/swift-t,JohnPJenkins/swift-t,swift-lang/swift-t,basheersubei/swift-t,blue42u/swift-t,blue42u/swift-t,swift-lang/swift-t,blue42u/swift-t,swift-lang/swift-t,blue42u/swift-t,blue42u/swift-t,basheersubei/swift-t
<INSERT> toks = line.split() if len(toks) != 2: print >> sys.stderr, "Bad line: %s" % repr(toks) else: <INSERT_END> <REPLACE_OLD> line.split() <REPLACE_NEW> toks <REPLACE_END> <INSERT> <INSERT_END> <INSERT> >> sys.stderr, <INSERT_END> <|endoftext|> #!/usr/bin/env python2.7 import sys files = sys.argv[1:] keys = set() fileVals = [] for file in files: vals = {} fileVals.append(vals) try: for line in open(file).readlines(): toks = line.split() if len(toks) != 2: print >> sys.stderr, "Bad line: %s" % repr(toks) else: k, v = toks vals[k] = v keys.add(k) except Exception, e: print >> sys.stderr, "Error in line \"%s\" of file %s" % (line, file) raise e for key in sorted(keys): sys.stdout.write(key) for vals in fileVals: sys.stdout.write("\t") sys.stdout.write(str(vals.get(key, 0))) sys.stdout.write("\n")
Add initial data for operations counts git-svn-id: 0c5512015aa96f7d3f5c3ad598bd98edc52008b1@12204 dc4e9af1-7f46-4ead-bba6-71afc04862de #!/usr/bin/env python2.7 import sys files = sys.argv[1:] keys = set() fileVals = [] for file in files: vals = {} fileVals.append(vals) try: for line in open(file).readlines(): k, v = line.split() vals[k] = v keys.add(k) except Exception, e: print "Error in line \"%s\" of file %s" % (line, file) raise e for key in sorted(keys): sys.stdout.write(key) for vals in fileVals: sys.stdout.write("\t") sys.stdout.write(str(vals.get(key, 0))) sys.stdout.write("\n")
4d51fad87581281cd98d97a091346018d9784411
word2vec_api.py
word2vec_api.py
from flask import Flask, request from flask.ext.restful import reqparse, Api, Resource from gensim.models.word2vec import Word2Vec import json print 'loading model' MODEL = Word2Vec.load_word2vec_format( 'GoogleNews-vectors-negative300.bin.gz', binary=True) print 'model loaded' app = Flask(__name__) api = Api(app) class HelloWorld(Resource): def post(self): # reqparse didn't work, when a single item was passed in the negative # field It was splitting the string by character args = request.get_json() result = MODEL.most_similar( positive=args['positive'], negative=args['negative'], topn=args['topn'], ) return {'result': result}, 201 api.add_resource(HelloWorld, '/most_similar') if __name__ == '__main__': app.run(debug=True)
from flask import Flask, request from flask.ext.restful import reqparse, Api, Resource from gensim.models.word2vec import Word2Vec import json print 'loading model' MODEL = Word2Vec.load('GoogleNews-vectors-negative300.gensim') print 'model loaded' app = Flask(__name__) api = Api(app) class HelloWorld(Resource): def post(self): # reqparse didn't work, when a single item was passed in the negative # field It was splitting the string by character args = request.get_json() try: result = MODEL.most_similar( positive=args['positive'], negative=args['negative'], topn=args['topn'], ) except KeyError: return {'result': [("Sorry, I haven't learned that word yet", -1)]}, 201 return {'result': result}, 201 api.add_resource(HelloWorld, '/most_similar') if __name__ == '__main__': app.run(debug=False)
Update to use gensim format
api: Update to use gensim format
Python
mit
mdbecker/word2vec_demo,mdbecker/word2vec_demo
<REPLACE_OLD> Word2Vec.load_word2vec_format( 'GoogleNews-vectors-negative300.bin.gz', binary=True) print <REPLACE_NEW> Word2Vec.load('GoogleNews-vectors-negative300.gensim') print <REPLACE_END> <INSERT> try: <INSERT_END> <INSERT> <INSERT_END> <INSERT> <INSERT_END> <INSERT> <INSERT_END> <INSERT> <INSERT_END> <INSERT> except KeyError: return {'result': [("Sorry, I haven't learned that word yet", -1)]}, 201 <INSERT_END> <REPLACE_OLD> app.run(debug=True) <REPLACE_NEW> app.run(debug=False) <REPLACE_END> <|endoftext|> from flask import Flask, request from flask.ext.restful import reqparse, Api, Resource from gensim.models.word2vec import Word2Vec import json print 'loading model' MODEL = Word2Vec.load('GoogleNews-vectors-negative300.gensim') print 'model loaded' app = Flask(__name__) api = Api(app) class HelloWorld(Resource): def post(self): # reqparse didn't work, when a single item was passed in the negative # field It was splitting the string by character args = request.get_json() try: result = MODEL.most_similar( positive=args['positive'], negative=args['negative'], topn=args['topn'], ) except KeyError: return {'result': [("Sorry, I haven't learned that word yet", -1)]}, 201 return {'result': result}, 201 api.add_resource(HelloWorld, '/most_similar') if __name__ == '__main__': app.run(debug=False)
api: Update to use gensim format from flask import Flask, request from flask.ext.restful import reqparse, Api, Resource from gensim.models.word2vec import Word2Vec import json print 'loading model' MODEL = Word2Vec.load_word2vec_format( 'GoogleNews-vectors-negative300.bin.gz', binary=True) print 'model loaded' app = Flask(__name__) api = Api(app) class HelloWorld(Resource): def post(self): # reqparse didn't work, when a single item was passed in the negative # field It was splitting the string by character args = request.get_json() result = MODEL.most_similar( positive=args['positive'], negative=args['negative'], topn=args['topn'], ) return {'result': result}, 201 api.add_resource(HelloWorld, '/most_similar') if __name__ == '__main__': app.run(debug=True)
29a3e0ebf69531357d832d6014d15a3fe2f76682
setup.py
setup.py
from os.path import abspath, dirname, join from setuptools import find_packages, setup REQUIREMENTS = [ 'requests', 'six', 'websocket-client', ] HERE = dirname(abspath(__file__)) DESCRIPTION = '\n\n'.join(open(join(HERE, _), encoding="utf8").read() for _ in [ 'README.rst', 'CHANGES.rst', ]) setup( name='socketIO_client', version='0.6.3', description='A socket.io client library', long_description=DESCRIPTION, license='MIT', classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python', 'License :: OSI Approved :: MIT License', ], keywords='socket.io node.js', author='Roy Hyunjin Han', author_email='rhh@crosscompute.com', url='https://github.com/invisibleroads/socketIO-client', install_requires=REQUIREMENTS, tests_require=[ 'nose', 'coverage', ], packages=find_packages(), include_package_data=True, zip_safe=False)
import io from os.path import abspath, dirname, join from setuptools import find_packages, setup REQUIREMENTS = [ 'requests', 'six', 'websocket-client', ] HERE = dirname(abspath(__file__)) LOAD_TEXT = lambda name: io.open(join(HERE, name), encoding='UTF-8').read() DESCRIPTION = '\n\n'.join(LOAD_TEXT(_) for _ in [ 'README.rst', 'CHANGES.rst', ]) setup( name='socketIO_client', version='0.6.3', description='A socket.io client library', long_description=DESCRIPTION, license='MIT', classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python', 'License :: OSI Approved :: MIT License', ], keywords='socket.io node.js', author='Roy Hyunjin Han', author_email='rhh@crosscompute.com', url='https://github.com/invisibleroads/socketIO-client', install_requires=REQUIREMENTS, tests_require=[ 'nose', 'coverage', ], packages=find_packages(), include_package_data=True, zip_safe=False)
Support Python 2.6 & 2.7
Support Python 2.6 & 2.7
Python
mit
connor4312/socketIO-client,invisibleroads/socketIO-client,invisibleroads/socketIO-client,danielquinn/socketIO-client,feus4177/socketIO-client-2,connor4312/socketIO-client,feus4177/socketIO-client-2,invisibleroads/socketIO-client,danielquinn/socketIO-client,feus4177/socketIO-client-2,danielquinn/socketIO-client,connor4312/socketIO-client
<REPLACE_OLD> from <REPLACE_NEW> import io from <REPLACE_END> <REPLACE_OLD> dirname(abspath(__file__)) DESCRIPTION <REPLACE_NEW> dirname(abspath(__file__)) LOAD_TEXT <REPLACE_END> <REPLACE_OLD> '\n\n'.join(open(join(HERE, _), encoding="utf8").read() <REPLACE_NEW> lambda name: io.open(join(HERE, name), encoding='UTF-8').read() DESCRIPTION = '\n\n'.join(LOAD_TEXT(_) <REPLACE_END> <|endoftext|> import io from os.path import abspath, dirname, join from setuptools import find_packages, setup REQUIREMENTS = [ 'requests', 'six', 'websocket-client', ] HERE = dirname(abspath(__file__)) LOAD_TEXT = lambda name: io.open(join(HERE, name), encoding='UTF-8').read() DESCRIPTION = '\n\n'.join(LOAD_TEXT(_) for _ in [ 'README.rst', 'CHANGES.rst', ]) setup( name='socketIO_client', version='0.6.3', description='A socket.io client library', long_description=DESCRIPTION, license='MIT', classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python', 'License :: OSI Approved :: MIT License', ], keywords='socket.io node.js', author='Roy Hyunjin Han', author_email='rhh@crosscompute.com', url='https://github.com/invisibleroads/socketIO-client', install_requires=REQUIREMENTS, tests_require=[ 'nose', 'coverage', ], packages=find_packages(), include_package_data=True, zip_safe=False)
Support Python 2.6 & 2.7 from os.path import abspath, dirname, join from setuptools import find_packages, setup REQUIREMENTS = [ 'requests', 'six', 'websocket-client', ] HERE = dirname(abspath(__file__)) DESCRIPTION = '\n\n'.join(open(join(HERE, _), encoding="utf8").read() for _ in [ 'README.rst', 'CHANGES.rst', ]) setup( name='socketIO_client', version='0.6.3', description='A socket.io client library', long_description=DESCRIPTION, license='MIT', classifiers=[ 'Intended Audience :: Developers', 'Programming Language :: Python', 'License :: OSI Approved :: MIT License', ], keywords='socket.io node.js', author='Roy Hyunjin Han', author_email='rhh@crosscompute.com', url='https://github.com/invisibleroads/socketIO-client', install_requires=REQUIREMENTS, tests_require=[ 'nose', 'coverage', ], packages=find_packages(), include_package_data=True, zip_safe=False)
a973f489d265c0f4f5baa3357eaf5a772a825821
tweetstream.py
tweetstream.py
# -*- coding: utf-8 -*- import os from tweepy import Stream from tweepy import OAuthHandler from tweepy import API from tweepy.streaming import StreamListener from listener import Listener ckey = os.environ['CKEY'] consumer_secret = os.environ['CONSUMER_KEY'] access_token_key = os.environ['ACCESS_TOKEN_KEY'] access_token_secret = os.environ['ACCESS_TOKEN_SECRET'] keywords = [ u"كيماوي", u"غاز سام", u"كلور", u"اختناق", u"سام", u"غازات سامة", u"الكلور", u"الكيماوي", u"الاختناق", u"الغازات السامة", u"السام" ] def call(): auth = OAuthHandler(ckey, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) print "Connecting to Twitter Streaming API..." api = API(auth) print "Done." # initialize Stream object twitterStream = Stream(auth, Listener(api)) # call filter on Stream object twitterStream.filter(track=keywords, languages=["ar"])
# -*- coding: utf-8 -*- import os from tweepy import Stream from tweepy import OAuthHandler from tweepy import API from tweepy.streaming import StreamListener from listener import Listener ckey = os.environ['CKEY'] consumer_secret = os.environ['CONSUMER_SECRET'] access_token_key = os.environ['ACCESS_TOKEN_KEY'] access_token_secret = os.environ['ACCESS_TOKEN_SECRET'] keywords = [ u"كيماوي", u"غاز سام", u"كلور", u"اختناق", u"سام", u"غازات سامة", u"الكلور", u"الكيماوي", u"الاختناق", u"الغازات السامة", u"السام" ] def call(): auth = OAuthHandler(ckey, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) print "Connecting to Twitter Streaming API..." api = API(auth) print "Done." # initialize Stream object twitterStream = Stream(auth, Listener(api)) # call filter on Stream object twitterStream.filter(track=keywords, languages=["ar"])
Use the correct env variable name
Use the correct env variable name
Python
mit
robot-overlord/syriarightnow
<REPLACE_OLD> os.environ['CONSUMER_KEY'] access_token_key <REPLACE_NEW> os.environ['CONSUMER_SECRET'] access_token_key <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- import os from tweepy import Stream from tweepy import OAuthHandler from tweepy import API from tweepy.streaming import StreamListener from listener import Listener ckey = os.environ['CKEY'] consumer_secret = os.environ['CONSUMER_SECRET'] access_token_key = os.environ['ACCESS_TOKEN_KEY'] access_token_secret = os.environ['ACCESS_TOKEN_SECRET'] keywords = [ u"كيماوي", u"غاز سام", u"كلور", u"اختناق", u"سام", u"غازات سامة", u"الكلور", u"الكيماوي", u"الاختناق", u"الغازات السامة", u"السام" ] def call(): auth = OAuthHandler(ckey, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) print "Connecting to Twitter Streaming API..." api = API(auth) print "Done." # initialize Stream object twitterStream = Stream(auth, Listener(api)) # call filter on Stream object twitterStream.filter(track=keywords, languages=["ar"])
Use the correct env variable name # -*- coding: utf-8 -*- import os from tweepy import Stream from tweepy import OAuthHandler from tweepy import API from tweepy.streaming import StreamListener from listener import Listener ckey = os.environ['CKEY'] consumer_secret = os.environ['CONSUMER_KEY'] access_token_key = os.environ['ACCESS_TOKEN_KEY'] access_token_secret = os.environ['ACCESS_TOKEN_SECRET'] keywords = [ u"كيماوي", u"غاز سام", u"كلور", u"اختناق", u"سام", u"غازات سامة", u"الكلور", u"الكيماوي", u"الاختناق", u"الغازات السامة", u"السام" ] def call(): auth = OAuthHandler(ckey, consumer_secret) auth.set_access_token(access_token_key, access_token_secret) print "Connecting to Twitter Streaming API..." api = API(auth) print "Done." # initialize Stream object twitterStream = Stream(auth, Listener(api)) # call filter on Stream object twitterStream.filter(track=keywords, languages=["ar"])
96439cb26a09158f112541025a6c2901b983eae9
tests/test_pay_onetime.py
tests/test_pay_onetime.py
# -*- coding: utf-8 -*- def test_pay_onetime(iamport): # Without 'card_number' payload_notEnough = { 'merchant_uid': 'qwer1234', 'amount': 5000, 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_notEnough) except KeyError as e: assert "Essential parameter is missing!: card_number" in str(e) payload_full = { 'merchant_uid': 'qwer1234', 'amount': 5000, 'card_number': '4092-0230-1234-1234', 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_full) except iamport.ResponseError as e: assert e.code == -1 assert u'카드정보 인증에 실패하였습니다.' in e.message
# -*- coding: utf-8 -*- import string, random def test_pay_onetime(iamport): merchant_uid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10) ) # Without 'card_number' payload_not_enough = { 'merchant_uid': merchant_uid, 'amount': 5000, 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_not_enough) except KeyError as e: assert "Essential parameter is missing!: card_number" in str(e) merchant_uid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10) ) payload_full = { 'merchant_uid': merchant_uid, 'amount': 5000, 'card_number': '4092-0230-1234-1234', 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_full) except iamport.ResponseError as e: assert e.code == -1 assert u'카드정보 인증에 실패하였습니다.' in e.message
Add random merchant_uid for continous testing
Add random merchant_uid for continous testing
Python
mit
iamport/iamport-rest-client-python
<REPLACE_OLD> -*- def <REPLACE_NEW> -*- import string, random def <REPLACE_END> <INSERT> merchant_uid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10) ) <INSERT_END> <REPLACE_OLD> payload_notEnough <REPLACE_NEW> payload_not_enough <REPLACE_END> <REPLACE_OLD> 'qwer1234', <REPLACE_NEW> merchant_uid, <REPLACE_END> <REPLACE_OLD> iamport.pay_onetime(**payload_notEnough) <REPLACE_NEW> iamport.pay_onetime(**payload_not_enough) <REPLACE_END> <INSERT> merchant_uid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10) ) <INSERT_END> <REPLACE_OLD> 'qwer1234', <REPLACE_NEW> merchant_uid, <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- import string, random def test_pay_onetime(iamport): merchant_uid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10) ) # Without 'card_number' payload_not_enough = { 'merchant_uid': merchant_uid, 'amount': 5000, 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_not_enough) except KeyError as e: assert "Essential parameter is missing!: card_number" in str(e) merchant_uid = ''.join( random.choice(string.ascii_uppercase + string.digits) for _ in range(10) ) payload_full = { 'merchant_uid': merchant_uid, 'amount': 5000, 'card_number': '4092-0230-1234-1234', 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_full) except iamport.ResponseError as e: assert e.code == -1 assert u'카드정보 인증에 실패하였습니다.' in e.message
Add random merchant_uid for continous testing # -*- coding: utf-8 -*- def test_pay_onetime(iamport): # Without 'card_number' payload_notEnough = { 'merchant_uid': 'qwer1234', 'amount': 5000, 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_notEnough) except KeyError as e: assert "Essential parameter is missing!: card_number" in str(e) payload_full = { 'merchant_uid': 'qwer1234', 'amount': 5000, 'card_number': '4092-0230-1234-1234', 'expiry': '2019-03', 'birth': '500203', 'pwd_2digit': '19' } try: iamport.pay_onetime(**payload_full) except iamport.ResponseError as e: assert e.code == -1 assert u'카드정보 인증에 실패하였습니다.' in e.message
c0f06b64c15d74be26be2cd9e6d593e5c5cae2a9
tools/xml_select_minmax.py
tools/xml_select_minmax.py
#! /usr/bin/python3 import sys import argparse import xml_utils as u import os from argparse import RawTextHelpFormatter ##---------------------------------------------------------- ## for each label that has more than the mininum count, select the ## largest subset less than the maxinum count. ## writes out to a new xml file. ##---------------------------------------------------------- def main (argv) : parser = argparse.ArgumentParser(description='Select a subset if label count falls between min and max.', formatter_class=RawTextHelpFormatter) parser.add_argument ('image_db') parser.add_argument ('min', default=0) parser.add_argument ('max', default=0) parser.add_argument ('files', nargs='+') parser.add_argument ('-o', '-out', '--output') parser.add_argument ('-filetype', '--filetype', default="chips", help='Type of input file <images|faces|chips>. Defaults to "chips".') parser.add_argument ('-v', '--verbosity', type=int, default=1, choices=[0, 1, 2, 3], help='') # help="increase output verbosity" u.set_argv (argv) args = parser.parse_args() u.set_verbosity (args.verbosity) u.set_argv (argv) u.set_filetype ('faces') verbose = 0 if verbose > 0: print("files: ", args.files) filetypes = ['images', 'chips', 'faces'] filetype = args.filetype if filetype not in filetypes : print('unrecognized filetype :', filetype, 'should be one of:', filetypes) return output_file = 'selected_' + u.current_datetime () + '.xml' if args.output : # user specified if not os.path.exists (args.output) : output_file = args.output else : print ('output file exists, writing to', output_file) u.select_labels_minmax (args.files, args.image_db, args.min, args.max, output_file, filetype) if __name__ == "__main__": main (sys.argv)
Select a subset for each label. If the label image count is greater than min, return the greatest number of images less than or equal to the max. The images will be evenly distributed across years, seasons and days.
Select a subset for each label. If the label image count is greater than min, return the greatest number of images less than or equal to the max. The images will be evenly distributed across years, seasons and days.
Python
mit
hypraptive/bearid,hypraptive/bearid,hypraptive/bearid
<INSERT> #! /usr/bin/python3 import sys import argparse import xml_utils as u import os from argparse import RawTextHelpFormatter ##---------------------------------------------------------- ## <INSERT_END> <INSERT> for each label that has more than the mininum count, select the ## largest subset less than the maxinum count. ## writes out to a new xml file. ##---------------------------------------------------------- def main (argv) : parser = argparse.ArgumentParser(description='Select a subset if label count falls between min and max.', formatter_class=RawTextHelpFormatter) parser.add_argument ('image_db') parser.add_argument ('min', default=0) parser.add_argument ('max', default=0) parser.add_argument ('files', nargs='+') parser.add_argument ('-o', '-out', '--output') parser.add_argument ('-filetype', '--filetype', default="chips", help='Type of input file <images|faces|chips>. Defaults to "chips".') parser.add_argument ('-v', '--verbosity', type=int, default=1, choices=[0, 1, 2, 3], help='') # help="increase output verbosity" u.set_argv (argv) args = parser.parse_args() u.set_verbosity (args.verbosity) u.set_argv (argv) u.set_filetype ('faces') verbose = 0 if verbose > 0: print("files: ", args.files) filetypes = ['images', 'chips', 'faces'] filetype = args.filetype if filetype not in filetypes : print('unrecognized filetype :', filetype, 'should be one of:', filetypes) return output_file = 'selected_' + u.current_datetime () + '.xml' if args.output : # user specified if not os.path.exists (args.output) : output_file = args.output else : print ('output file exists, writing to', output_file) u.select_labels_minmax (args.files, args.image_db, args.min, args.max, output_file, filetype) if __name__ == "__main__": main (sys.argv) <INSERT_END> <|endoftext|> #! /usr/bin/python3 import sys import argparse import xml_utils as u import os from argparse import RawTextHelpFormatter ##---------------------------------------------------------- ## for each label that has more than the mininum count, select the ## largest subset less than the maxinum count. ## writes out to a new xml file. ##---------------------------------------------------------- def main (argv) : parser = argparse.ArgumentParser(description='Select a subset if label count falls between min and max.', formatter_class=RawTextHelpFormatter) parser.add_argument ('image_db') parser.add_argument ('min', default=0) parser.add_argument ('max', default=0) parser.add_argument ('files', nargs='+') parser.add_argument ('-o', '-out', '--output') parser.add_argument ('-filetype', '--filetype', default="chips", help='Type of input file <images|faces|chips>. Defaults to "chips".') parser.add_argument ('-v', '--verbosity', type=int, default=1, choices=[0, 1, 2, 3], help='') # help="increase output verbosity" u.set_argv (argv) args = parser.parse_args() u.set_verbosity (args.verbosity) u.set_argv (argv) u.set_filetype ('faces') verbose = 0 if verbose > 0: print("files: ", args.files) filetypes = ['images', 'chips', 'faces'] filetype = args.filetype if filetype not in filetypes : print('unrecognized filetype :', filetype, 'should be one of:', filetypes) return output_file = 'selected_' + u.current_datetime () + '.xml' if args.output : # user specified if not os.path.exists (args.output) : output_file = args.output else : print ('output file exists, writing to', output_file) u.select_labels_minmax (args.files, args.image_db, args.min, args.max, output_file, filetype) if __name__ == "__main__": main (sys.argv)
Select a subset for each label. If the label image count is greater than min, return the greatest number of images less than or equal to the max. The images will be evenly distributed across years, seasons and days.
99909048bc702e21e980bb1167caf9217aa31196
steel/fields/strings.py
steel/fields/strings.py
import codecs from steel.fields import Field from steel.fields.mixin import Fixed __all__ = ['Bytes', 'String', 'FixedBytes', 'FixedString'] class Bytes(Field): "A stream of bytes that should be left unconverted" def encode(self, value): # Nothing to do here return value def decode(self, value): # Nothing to do here return value class String(Field): "A string that gets converted using a specified encoding" def __init__(self, *args, encoding, **kwargs): # Bail out early if the encoding isn't valid codecs.lookup(encoding) self.encoding = encoding super(String, self).__init__(*args, **kwargs) def encode(self, value): return value.encode(self.encoding) def decode(self, value): return value.decode(self.encoding) class FixedBytes(Fixed, Bytes): "A stream of bytes that will always be set to the same value" # The mixin does the heavy lifting pass class FixedString(Fixed, String): "A stream of bytes that will always be set to the same value" # The mixin does the heavy lifting pass
import codecs from steel.fields import Field from steel.fields.mixin import Fixed __all__ = ['Bytes', 'String', 'FixedBytes', 'FixedString'] class Bytes(Field): "A stream of bytes that should be left unconverted" def encode(self, value): # Nothing to do here return value def decode(self, value): # Nothing to do here return value class String(Field): "A string that gets converted using a specified encoding" def __init__(self, *args, encoding, **kwargs): # Bail out early if the encoding isn't valid codecs.lookup(encoding) self.encoding = encoding super(String, self).__init__(*args, **kwargs) def encode(self, value): return value.encode(self.encoding) def decode(self, value): return value.decode(self.encoding) class FixedBytes(Fixed, Bytes): "A stream of bytes that will always be set to the same value" # The mixin does the heavy lifting pass class FixedString(Fixed, String): "A string that will always be set to the same value" # The mixin does the heavy lifting pass
Fix the docstring for FixedString
Fix the docstring for FixedString
Python
bsd-3-clause
gulopine/steel-experiment
<REPLACE_OLD> stream of bytes <REPLACE_NEW> string <REPLACE_END> <|endoftext|> import codecs from steel.fields import Field from steel.fields.mixin import Fixed __all__ = ['Bytes', 'String', 'FixedBytes', 'FixedString'] class Bytes(Field): "A stream of bytes that should be left unconverted" def encode(self, value): # Nothing to do here return value def decode(self, value): # Nothing to do here return value class String(Field): "A string that gets converted using a specified encoding" def __init__(self, *args, encoding, **kwargs): # Bail out early if the encoding isn't valid codecs.lookup(encoding) self.encoding = encoding super(String, self).__init__(*args, **kwargs) def encode(self, value): return value.encode(self.encoding) def decode(self, value): return value.decode(self.encoding) class FixedBytes(Fixed, Bytes): "A stream of bytes that will always be set to the same value" # The mixin does the heavy lifting pass class FixedString(Fixed, String): "A string that will always be set to the same value" # The mixin does the heavy lifting pass
Fix the docstring for FixedString import codecs from steel.fields import Field from steel.fields.mixin import Fixed __all__ = ['Bytes', 'String', 'FixedBytes', 'FixedString'] class Bytes(Field): "A stream of bytes that should be left unconverted" def encode(self, value): # Nothing to do here return value def decode(self, value): # Nothing to do here return value class String(Field): "A string that gets converted using a specified encoding" def __init__(self, *args, encoding, **kwargs): # Bail out early if the encoding isn't valid codecs.lookup(encoding) self.encoding = encoding super(String, self).__init__(*args, **kwargs) def encode(self, value): return value.encode(self.encoding) def decode(self, value): return value.decode(self.encoding) class FixedBytes(Fixed, Bytes): "A stream of bytes that will always be set to the same value" # The mixin does the heavy lifting pass class FixedString(Fixed, String): "A stream of bytes that will always be set to the same value" # The mixin does the heavy lifting pass
5782fc5cb505ff6af8d20411004a05ad53e82b90
fabfile/testbeds/testbed_mlab.py
fabfile/testbeds/testbed_mlab.py
from fabric.api import env #Management ip addresses of hosts in the cluster host1 = 'root@10.84.19.42' host2 = 'root@10.84.19.43' host3 = 'root@10.84.19.44' host4 = 'root@10.84.19.45' host5 = 'root@10.84.19.46' #External routers if any #for eg. #ext_routers = [('mx1', '10.204.216.253')] ext_routers = [] #Autonomous system number router_asn = 64512 #Host from which the fab commands are triggered to install and provision host_build = 'root@10.84.5.31' #Role definition of the hosts. env.roledefs = { 'all': [host1, host2, host3, host4, host5], 'cfgm': [host1], 'openstack': [host1], 'control': [host2, host3], 'compute': [host4, host5], 'collector': [host1, host2, host3], 'webui': [host1, host2, host3], 'database': [host1, host2, host3], 'build': [host_build], } env.hostnames = { 'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46'] } #Openstack admin password env.openstack_admin_password = 'chei9APh' env.password = 'c0ntrail123' #Passwords of each host env.passwords = { host1: 'c0ntrail123', host2: 'c0ntrail123', host3: 'c0ntrail123', host4: 'c0ntrail123', host5: 'c0ntrail123', host_build: 'c0ntrail123', } #For reimage purpose env.ostypes = { host1: 'ubuntu', host2: 'ubuntu', host3: 'ubuntu', host4: 'ubuntu', host5: 'ubuntu', } env.test_repo_dir='/root/contrail-sanity/contrail-test'
Add testbed file for mlab
Add testbed file for mlab
Python
apache-2.0
Juniper/contrail-fabric-utils,Juniper/contrail-fabric-utils
<INSERT> from fabric.api import env #Management ip addresses of hosts in the cluster host1 = 'root@10.84.19.42' host2 = 'root@10.84.19.43' host3 = 'root@10.84.19.44' host4 = 'root@10.84.19.45' host5 = 'root@10.84.19.46' #External routers if any #for eg. #ext_routers = [('mx1', '10.204.216.253')] ext_routers = [] #Autonomous system number router_asn = 64512 #Host from which the fab commands are triggered to install and provision host_build = 'root@10.84.5.31' #Role definition of the hosts. env.roledefs = { <INSERT_END> <INSERT> 'all': [host1, host2, host3, host4, host5], 'cfgm': [host1], 'openstack': [host1], 'control': [host2, host3], 'compute': [host4, host5], 'collector': [host1, host2, host3], 'webui': [host1, host2, host3], 'database': [host1, host2, host3], 'build': [host_build], } env.hostnames = { 'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46'] } #Openstack admin password env.openstack_admin_password = 'chei9APh' env.password = 'c0ntrail123' #Passwords of each host env.passwords = { host1: 'c0ntrail123', host2: 'c0ntrail123', host3: 'c0ntrail123', host4: 'c0ntrail123', host5: 'c0ntrail123', host_build: 'c0ntrail123', } #For reimage purpose env.ostypes = { host1: 'ubuntu', host2: 'ubuntu', host3: 'ubuntu', host4: 'ubuntu', host5: 'ubuntu', } env.test_repo_dir='/root/contrail-sanity/contrail-test' <INSERT_END> <|endoftext|> from fabric.api import env #Management ip addresses of hosts in the cluster host1 = 'root@10.84.19.42' host2 = 'root@10.84.19.43' host3 = 'root@10.84.19.44' host4 = 'root@10.84.19.45' host5 = 'root@10.84.19.46' #External routers if any #for eg. #ext_routers = [('mx1', '10.204.216.253')] ext_routers = [] #Autonomous system number router_asn = 64512 #Host from which the fab commands are triggered to install and provision host_build = 'root@10.84.5.31' #Role definition of the hosts. env.roledefs = { 'all': [host1, host2, host3, host4, host5], 'cfgm': [host1], 'openstack': [host1], 'control': [host2, host3], 'compute': [host4, host5], 'collector': [host1, host2, host3], 'webui': [host1, host2, host3], 'database': [host1, host2, host3], 'build': [host_build], } env.hostnames = { 'all': ['z0', 'z1', 'z2','10.84.19.45', '10.84.19.46'] } #Openstack admin password env.openstack_admin_password = 'chei9APh' env.password = 'c0ntrail123' #Passwords of each host env.passwords = { host1: 'c0ntrail123', host2: 'c0ntrail123', host3: 'c0ntrail123', host4: 'c0ntrail123', host5: 'c0ntrail123', host_build: 'c0ntrail123', } #For reimage purpose env.ostypes = { host1: 'ubuntu', host2: 'ubuntu', host3: 'ubuntu', host4: 'ubuntu', host5: 'ubuntu', } env.test_repo_dir='/root/contrail-sanity/contrail-test'
Add testbed file for mlab
fb59f2e0bd01d75c90ea3cc0089c24fc5db86e8e
config/jupyter/jupyter_notebook_config.py
config/jupyter/jupyter_notebook_config.py
import sys sys.path.append('/root/.jupyter/extensions/') c.JupyterApp.ip = '*' c.JupyterApp.port = 80 c.JupyterApp.open_browser = False c.JupyterApp.allow_credentials = True c.JupyterApp.nbserver_extensions = ['jupyter_nbgallery.status', 'jupyter_nbgallery.post'] c.JupyterApp.reraise_server_extension_failures = True c.JupyterApp.extra_static_paths = ['/root/.jupyter/static'] c.JupyterApp.extra_nbextensions_path = ['/root/.jupyter/extensions/'] c.JupyterApp.tornado_settings = {'static_url_prefix': '/Jupyter/static/'} c.JupyterApp.allow_origin = 'https://nb.gallery' # needed to receive notebooks from the gallery c.JupyterApp.disable_check_xsrf = True
import json import os import sys sys.path.append('/root/.jupyter/extensions/') c.JupyterApp.ip = '*' c.JupyterApp.port = 80 c.JupyterApp.open_browser = False c.JupyterApp.allow_credentials = True c.JupyterApp.nbserver_extensions = ['jupyter_nbgallery.status', 'jupyter_nbgallery.post'] c.JupyterApp.reraise_server_extension_failures = True c.JupyterApp.extra_static_paths = ['/root/.jupyter/static'] c.JupyterApp.extra_nbextensions_path = ['/root/.jupyter/extensions/'] c.JupyterApp.tornado_settings = {'static_url_prefix': '/Jupyter/static/'} c.JupyterApp.allow_origin = 'https://nb.gallery' # needed to receive notebooks from the gallery c.JupyterApp.disable_check_xsrf = True # Override gallery location nbgallery_url = os.getenv('NBGALLERY_URL') if nbgallery_url: print('Setting nbgallery url to %s' % nbgallery_url) c.JupyterApp.allow_origin = nbgallery_url config = json.loads(open('/root/.jupyter/nbconfig/common.json').read()) config['nbgallery']['url'] = nbgallery_url with open('/root/.jupyter/nbconfig/common.json', 'w') as output: output.write(json.dumps(config, indent=2))
Set $NBGALLERY_URL to override gallery location
Set $NBGALLERY_URL to override gallery location
Python
mit
jupyter-gallery/jupyter-docker,jupyter-gallery/jupyter-docker,jupyter-gallery/jupyter-docker
<INSERT> json import os import <INSERT_END> <REPLACE_OLD> True <REPLACE_NEW> True # Override gallery location nbgallery_url = os.getenv('NBGALLERY_URL') if nbgallery_url: print('Setting nbgallery url to %s' % nbgallery_url) c.JupyterApp.allow_origin = nbgallery_url config = json.loads(open('/root/.jupyter/nbconfig/common.json').read()) config['nbgallery']['url'] = nbgallery_url with open('/root/.jupyter/nbconfig/common.json', 'w') as output: output.write(json.dumps(config, indent=2)) <REPLACE_END> <|endoftext|> import json import os import sys sys.path.append('/root/.jupyter/extensions/') c.JupyterApp.ip = '*' c.JupyterApp.port = 80 c.JupyterApp.open_browser = False c.JupyterApp.allow_credentials = True c.JupyterApp.nbserver_extensions = ['jupyter_nbgallery.status', 'jupyter_nbgallery.post'] c.JupyterApp.reraise_server_extension_failures = True c.JupyterApp.extra_static_paths = ['/root/.jupyter/static'] c.JupyterApp.extra_nbextensions_path = ['/root/.jupyter/extensions/'] c.JupyterApp.tornado_settings = {'static_url_prefix': '/Jupyter/static/'} c.JupyterApp.allow_origin = 'https://nb.gallery' # needed to receive notebooks from the gallery c.JupyterApp.disable_check_xsrf = True # Override gallery location nbgallery_url = os.getenv('NBGALLERY_URL') if nbgallery_url: print('Setting nbgallery url to %s' % nbgallery_url) c.JupyterApp.allow_origin = nbgallery_url config = json.loads(open('/root/.jupyter/nbconfig/common.json').read()) config['nbgallery']['url'] = nbgallery_url with open('/root/.jupyter/nbconfig/common.json', 'w') as output: output.write(json.dumps(config, indent=2))
Set $NBGALLERY_URL to override gallery location import sys sys.path.append('/root/.jupyter/extensions/') c.JupyterApp.ip = '*' c.JupyterApp.port = 80 c.JupyterApp.open_browser = False c.JupyterApp.allow_credentials = True c.JupyterApp.nbserver_extensions = ['jupyter_nbgallery.status', 'jupyter_nbgallery.post'] c.JupyterApp.reraise_server_extension_failures = True c.JupyterApp.extra_static_paths = ['/root/.jupyter/static'] c.JupyterApp.extra_nbextensions_path = ['/root/.jupyter/extensions/'] c.JupyterApp.tornado_settings = {'static_url_prefix': '/Jupyter/static/'} c.JupyterApp.allow_origin = 'https://nb.gallery' # needed to receive notebooks from the gallery c.JupyterApp.disable_check_xsrf = True
b1c02240b8c2b291d43c70100c517a6038a91de4
tests/dags/test_external_task_sensor_fn_multiple_execution_dates_dags.py
tests/dags/test_external_task_sensor_fn_multiple_execution_dates_dags.py
from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.dummy_operator import DummyOperator from airflow.operators.sensors import ExternalTaskSensor from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE from datetime import datetime, timedelta args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} bash_command_code = """ {% set s=execution_date.time().second %} echo "second is {{ s }}" if [[ $(( {{ s }} % 60 )) == 1 ]] then exit 1 fi exit 0 """ # That DAG is use to test the behavior of the ExternalTaskSensor # when depending on several runs of an external task. # test_without_failure should not fail, leading to # test_external_task_sensor_multiple_dates_with_failure # to succeed, whereas test_with_failure should fail once # per minute (the DAG runs every second) leading to # test_external_task_sensor_multiple_dates_with_failure # to fail (because of timeout). dag_external_id = TEST_DAG_ID + '_secondly_external' dag_secondly_external = DAG(dag_external_id, default_args=args, schedule_interval=timedelta(seconds=1)) dag_secondly_external.add_task(BashOperator( task_id="test_with_failure", bash_command=bash_command_code, retries=0, depends_on_past=False, start_date=DEFAULT_DATE)) dag_secondly_external.add_task(DummyOperator( task_id="test_without_failure", retries=0, depends_on_past=False, start_date=DEFAULT_DATE)) dag_id = TEST_DAG_ID + '_minutely' dag_minutely = DAG(dag_id, default_args=args, schedule_interval=timedelta(minutes=1)) dag_minutely.add_task(ExternalTaskSensor( task_id='test_external_task_sensor_multiple_dates_without_failure', external_dag_id=dag_external_id, external_task_id='test_without_failure', execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)], allowed_states=['success'], retries=0, timeout=1, poke_interval=1, depends_on_past=False, start_date=DEFAULT_DATE)) dag_minutely.add_task(ExternalTaskSensor( task_id='test_external_task_sensor_multiple_dates_with_failure', external_dag_id=dag_external_id, external_task_id='test_with_failure', execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)], allowed_states=['success'], retries=0, depends_on_past=False, timeout=1, poke_interval=1, start_date=DEFAULT_DATE))
Add ability for ExternalTaskSensor to wait on multiple runs of a task
Add ability for ExternalTaskSensor to wait on multiple runs of a task Currently using the execution_date_fn parameter of the ExternalTaskSensor sensors only allows to wait for the completion of one given run of the task the ExternalTaskSensor is sensing. However, this prevents users to have setups where dags don't have the same schedule frequency but still depend on one another. For example, let's say you have a dag scheduled hourly that transforms log data and is owned by the team in charge of logging. In the current setup you cannot have other higher level teams, that want to use this transformed data, create dags processing transformed log data in daily batches, while making sure the logged transformed data was properly created. Note that simply waiting for the data to be present (using e.g. the HivePartitionSensor if the data is in hive) might not be satisfactory because the data being present doesn't mean it is ready to be used. This commit adds the ability to do exactly that by being able to have an ExternalTaskSensor wait for multiple runs of the task it is sensing to have finished. Now higher level teams can setup dags with an ExternalTaskSensor sensing the end task of the dag that transforms the log data and waiting for the successful completion of 24 of its hourly runs.
Python
apache-2.0
yati-sagade/incubator-airflow,yati-sagade/incubator-airflow,yati-sagade/incubator-airflow,yati-sagade/incubator-airflow
<REPLACE_OLD> <REPLACE_NEW> from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.dummy_operator import DummyOperator from airflow.operators.sensors import ExternalTaskSensor from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE from datetime import datetime, timedelta args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} bash_command_code = """ {% set s=execution_date.time().second %} echo "second is {{ s }}" if [[ $(( {{ s }} % 60 )) == 1 ]] then exit 1 fi exit 0 """ # That DAG is use to test the behavior of the ExternalTaskSensor # when depending on several runs of an external task. # test_without_failure should not fail, leading to # test_external_task_sensor_multiple_dates_with_failure # to succeed, whereas test_with_failure should fail once # per minute (the DAG runs every second) leading to # test_external_task_sensor_multiple_dates_with_failure # to fail (because of timeout). dag_external_id = TEST_DAG_ID + '_secondly_external' dag_secondly_external = DAG(dag_external_id, default_args=args, schedule_interval=timedelta(seconds=1)) dag_secondly_external.add_task(BashOperator( task_id="test_with_failure", bash_command=bash_command_code, retries=0, depends_on_past=False, start_date=DEFAULT_DATE)) dag_secondly_external.add_task(DummyOperator( task_id="test_without_failure", retries=0, depends_on_past=False, start_date=DEFAULT_DATE)) dag_id = TEST_DAG_ID + '_minutely' dag_minutely = DAG(dag_id, default_args=args, schedule_interval=timedelta(minutes=1)) dag_minutely.add_task(ExternalTaskSensor( task_id='test_external_task_sensor_multiple_dates_without_failure', external_dag_id=dag_external_id, external_task_id='test_without_failure', execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)], allowed_states=['success'], retries=0, timeout=1, poke_interval=1, depends_on_past=False, start_date=DEFAULT_DATE)) dag_minutely.add_task(ExternalTaskSensor( task_id='test_external_task_sensor_multiple_dates_with_failure', external_dag_id=dag_external_id, external_task_id='test_with_failure', execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)], allowed_states=['success'], retries=0, depends_on_past=False, timeout=1, poke_interval=1, start_date=DEFAULT_DATE)) <REPLACE_END> <|endoftext|> from airflow import DAG from airflow.operators.bash_operator import BashOperator from airflow.operators.dummy_operator import DummyOperator from airflow.operators.sensors import ExternalTaskSensor from tests.operators.sensors import TEST_DAG_ID, DEFAULT_DATE from datetime import datetime, timedelta args = {'owner': 'airflow', 'start_date': DEFAULT_DATE} bash_command_code = """ {% set s=execution_date.time().second %} echo "second is {{ s }}" if [[ $(( {{ s }} % 60 )) == 1 ]] then exit 1 fi exit 0 """ # That DAG is use to test the behavior of the ExternalTaskSensor # when depending on several runs of an external task. # test_without_failure should not fail, leading to # test_external_task_sensor_multiple_dates_with_failure # to succeed, whereas test_with_failure should fail once # per minute (the DAG runs every second) leading to # test_external_task_sensor_multiple_dates_with_failure # to fail (because of timeout). dag_external_id = TEST_DAG_ID + '_secondly_external' dag_secondly_external = DAG(dag_external_id, default_args=args, schedule_interval=timedelta(seconds=1)) dag_secondly_external.add_task(BashOperator( task_id="test_with_failure", bash_command=bash_command_code, retries=0, depends_on_past=False, start_date=DEFAULT_DATE)) dag_secondly_external.add_task(DummyOperator( task_id="test_without_failure", retries=0, depends_on_past=False, start_date=DEFAULT_DATE)) dag_id = TEST_DAG_ID + '_minutely' dag_minutely = DAG(dag_id, default_args=args, schedule_interval=timedelta(minutes=1)) dag_minutely.add_task(ExternalTaskSensor( task_id='test_external_task_sensor_multiple_dates_without_failure', external_dag_id=dag_external_id, external_task_id='test_without_failure', execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)], allowed_states=['success'], retries=0, timeout=1, poke_interval=1, depends_on_past=False, start_date=DEFAULT_DATE)) dag_minutely.add_task(ExternalTaskSensor( task_id='test_external_task_sensor_multiple_dates_with_failure', external_dag_id=dag_external_id, external_task_id='test_with_failure', execution_date_fn=lambda dt: [dt + timedelta(seconds=i) for i in range(2)], allowed_states=['success'], retries=0, depends_on_past=False, timeout=1, poke_interval=1, start_date=DEFAULT_DATE))
Add ability for ExternalTaskSensor to wait on multiple runs of a task Currently using the execution_date_fn parameter of the ExternalTaskSensor sensors only allows to wait for the completion of one given run of the task the ExternalTaskSensor is sensing. However, this prevents users to have setups where dags don't have the same schedule frequency but still depend on one another. For example, let's say you have a dag scheduled hourly that transforms log data and is owned by the team in charge of logging. In the current setup you cannot have other higher level teams, that want to use this transformed data, create dags processing transformed log data in daily batches, while making sure the logged transformed data was properly created. Note that simply waiting for the data to be present (using e.g. the HivePartitionSensor if the data is in hive) might not be satisfactory because the data being present doesn't mean it is ready to be used. This commit adds the ability to do exactly that by being able to have an ExternalTaskSensor wait for multiple runs of the task it is sensing to have finished. Now higher level teams can setup dags with an ExternalTaskSensor sensing the end task of the dag that transforms the log data and waiting for the successful completion of 24 of its hourly runs.
2a2b11f78ee64cd54cdfb8d105606215c42dcaa5
bot.py
bot.py
import tweepy from secrets import * # standard for accessing Twitter API auth = tweepy.OAuthHandler(C_KEY, C_SECRET) auth.set_access_token(A_TOKEN, A_TOKEN_SECRET) api = tweepy.API(auth)
Update 0.0.1 - Added imports - Added standards for accessing Twitter API
Update 0.0.1 - Added imports - Added standards for accessing Twitter API
Python
mit
FXelix/space_facts_bot
<REPLACE_OLD> <REPLACE_NEW> import tweepy from secrets import * # standard for accessing Twitter API auth = tweepy.OAuthHandler(C_KEY, C_SECRET) auth.set_access_token(A_TOKEN, A_TOKEN_SECRET) api = tweepy.API(auth) <REPLACE_END> <|endoftext|> import tweepy from secrets import * # standard for accessing Twitter API auth = tweepy.OAuthHandler(C_KEY, C_SECRET) auth.set_access_token(A_TOKEN, A_TOKEN_SECRET) api = tweepy.API(auth)
Update 0.0.1 - Added imports - Added standards for accessing Twitter API
bcd7f8f3d7313538ab1c04da9c42e774350ccdfe
ui/widgets/histogram/TrackingHistogramWidget.py
ui/widgets/histogram/TrackingHistogramWidget.py
""" TrackingHistogramWidget :Authors: Berend Klein Haneveld """ from PySide.QtGui import * from PySide.QtCore import * from HistogramWidget import HistogramWidget from TrackingNodeItem import TrackingNodeItem class TrackingHistogramWidget(HistogramWidget): """ TrackingHistogramWidget """ updatePosition = Signal(float) def __init__(self): super(TrackingHistogramWidget, self).__init__() self.nodeItem = None def update(self): super(TrackingHistogramWidget, self).update() if not self.nodeItem: return self.nodeItem.update() def setHistogram(self, histogram): super(TrackingHistogramWidget, self).setHistogram(histogram) if not self.nodeItem: self.nodeItem = TrackingNodeItem() self.scene().addItem(self.nodeItem) self.nodeItem.setHistogramItem(self._histogramItem) self.nodeItem.setPos(QPoint(0, 0)) self.nodeItem.setZValue(300) self.nodeItem.delegate = self def updatePos(self, position): self.updatePosition.emit(position)
""" TrackingHistogramWidget :Authors: Berend Klein Haneveld """ from PySide.QtGui import * from PySide.QtCore import * from HistogramWidget import HistogramWidget from TrackingNodeItem import TrackingNodeItem from ui.widgets import Style class TrackingHistogramWidget(HistogramWidget): """ TrackingHistogramWidget """ updatePosition = Signal(float) def __init__(self): super(TrackingHistogramWidget, self).__init__() self.nodeItem = None Style.styleWidgetForTab(self) def update(self): super(TrackingHistogramWidget, self).update() if not self.nodeItem: return self.nodeItem.update() def setHistogram(self, histogram): super(TrackingHistogramWidget, self).setHistogram(histogram) if not self.nodeItem: self.nodeItem = TrackingNodeItem() self.scene().addItem(self.nodeItem) self.nodeItem.setHistogramItem(self._histogramItem) self.nodeItem.setPos(QPoint(0, 0)) self.nodeItem.setZValue(300) self.nodeItem.delegate = self def updatePos(self, position): self.updatePosition.emit(position)
Fix background color on OS X for histogram widget of ray.
Fix background color on OS X for histogram widget of ray.
Python
mit
berendkleinhaneveld/Registrationshop,berendkleinhaneveld/Registrationshop
<REPLACE_OLD> TrackingNodeItem class <REPLACE_NEW> TrackingNodeItem from ui.widgets import Style class <REPLACE_END> <REPLACE_OLD> None def <REPLACE_NEW> None Style.styleWidgetForTab(self) def <REPLACE_END> <|endoftext|> """ TrackingHistogramWidget :Authors: Berend Klein Haneveld """ from PySide.QtGui import * from PySide.QtCore import * from HistogramWidget import HistogramWidget from TrackingNodeItem import TrackingNodeItem from ui.widgets import Style class TrackingHistogramWidget(HistogramWidget): """ TrackingHistogramWidget """ updatePosition = Signal(float) def __init__(self): super(TrackingHistogramWidget, self).__init__() self.nodeItem = None Style.styleWidgetForTab(self) def update(self): super(TrackingHistogramWidget, self).update() if not self.nodeItem: return self.nodeItem.update() def setHistogram(self, histogram): super(TrackingHistogramWidget, self).setHistogram(histogram) if not self.nodeItem: self.nodeItem = TrackingNodeItem() self.scene().addItem(self.nodeItem) self.nodeItem.setHistogramItem(self._histogramItem) self.nodeItem.setPos(QPoint(0, 0)) self.nodeItem.setZValue(300) self.nodeItem.delegate = self def updatePos(self, position): self.updatePosition.emit(position)
Fix background color on OS X for histogram widget of ray. """ TrackingHistogramWidget :Authors: Berend Klein Haneveld """ from PySide.QtGui import * from PySide.QtCore import * from HistogramWidget import HistogramWidget from TrackingNodeItem import TrackingNodeItem class TrackingHistogramWidget(HistogramWidget): """ TrackingHistogramWidget """ updatePosition = Signal(float) def __init__(self): super(TrackingHistogramWidget, self).__init__() self.nodeItem = None def update(self): super(TrackingHistogramWidget, self).update() if not self.nodeItem: return self.nodeItem.update() def setHistogram(self, histogram): super(TrackingHistogramWidget, self).setHistogram(histogram) if not self.nodeItem: self.nodeItem = TrackingNodeItem() self.scene().addItem(self.nodeItem) self.nodeItem.setHistogramItem(self._histogramItem) self.nodeItem.setPos(QPoint(0, 0)) self.nodeItem.setZValue(300) self.nodeItem.delegate = self def updatePos(self, position): self.updatePosition.emit(position)
b590ddd735131faa3fd1bdc91b1866e1bd7b0738
us_ignite/snippets/management/commands/snippets_load_fixtures.py
us_ignite/snippets/management/commands/snippets_load_fixtures.py
from django.core.management.base import BaseCommand from us_ignite.snippets.models import Snippet FIXTURES = [ { 'slug': 'home-box', 'name': 'Up next:', 'body': '', 'url_text': 'Get involved', 'url': '', }, ] class Command(BaseCommand): def handle(self, *args, **options): for data in FIXTURES: try: # Ignore existing snippets: Snippet.objects.get(slug=data['slug']) continue except Snippet.DoesNotExist: pass data.update({ 'status': Snippet.PUBLISHED, }) Snippet.objects.create(**data) print u'Importing %s' % data['slug'] print "Done!"
from django.core.management.base import BaseCommand from us_ignite.snippets.models import Snippet FIXTURES = [ { 'slug': 'home-box', 'name': 'UP NEXT: LOREM IPSUM', 'body': '', 'url_text': 'GET INVOLVED', 'url': '', }, { 'slug': 'featured', 'name': 'FEATURED CONTENT', 'body': '', 'url_text': 'FEATURED', 'url': '', }, ] class Command(BaseCommand): def handle(self, *args, **options): for data in FIXTURES: try: # Ignore existing snippets: Snippet.objects.get(slug=data['slug']) continue except Snippet.DoesNotExist: pass data.update({ 'status': Snippet.PUBLISHED, }) Snippet.objects.create(**data) print u'Importing %s' % data['slug'] print "Done!"
Add featured homepage initial fixture.
Add featured homepage initial fixture.
Python
bsd-3-clause
us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite,us-ignite/us_ignite
<REPLACE_OLD> 'Up next:', <REPLACE_NEW> 'UP NEXT: LOREM IPSUM', <REPLACE_END> <REPLACE_OLD> 'Get involved', <REPLACE_NEW> 'GET INVOLVED', 'url': '', }, { 'slug': 'featured', 'name': 'FEATURED CONTENT', 'body': '', 'url_text': 'FEATURED', <REPLACE_END> <|endoftext|> from django.core.management.base import BaseCommand from us_ignite.snippets.models import Snippet FIXTURES = [ { 'slug': 'home-box', 'name': 'UP NEXT: LOREM IPSUM', 'body': '', 'url_text': 'GET INVOLVED', 'url': '', }, { 'slug': 'featured', 'name': 'FEATURED CONTENT', 'body': '', 'url_text': 'FEATURED', 'url': '', }, ] class Command(BaseCommand): def handle(self, *args, **options): for data in FIXTURES: try: # Ignore existing snippets: Snippet.objects.get(slug=data['slug']) continue except Snippet.DoesNotExist: pass data.update({ 'status': Snippet.PUBLISHED, }) Snippet.objects.create(**data) print u'Importing %s' % data['slug'] print "Done!"
Add featured homepage initial fixture. from django.core.management.base import BaseCommand from us_ignite.snippets.models import Snippet FIXTURES = [ { 'slug': 'home-box', 'name': 'Up next:', 'body': '', 'url_text': 'Get involved', 'url': '', }, ] class Command(BaseCommand): def handle(self, *args, **options): for data in FIXTURES: try: # Ignore existing snippets: Snippet.objects.get(slug=data['slug']) continue except Snippet.DoesNotExist: pass data.update({ 'status': Snippet.PUBLISHED, }) Snippet.objects.create(**data) print u'Importing %s' % data['slug'] print "Done!"
1856e8102b709176e46c3751bd21234dd5d1eacc
src/nodeconductor_assembly_waldur/packages/tests/unittests/test_admin.py
src/nodeconductor_assembly_waldur/packages/tests/unittests/test_admin.py
from django.test import TestCase from .. import factories from ... import admin, models class TestPackageComponentForm(TestCase): def test_package_component_form_is_valid_when_component_price_is_0(self): data = { 'monthly_price': '0', 'amount': '2', 'type': models.PackageComponent.Types.RAM, 'price': '9', # price is required but not used in form validation. } form = admin.PackageComponentForm(data=data) self.assertTrue(form.is_valid()) def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self): template = factories.PackageTemplateFactory() factories.OpenStackPackageFactory(template=template) instance = template.components.first() data = { 'monthly_price': '0', 'amount': '2', 'type': instance.type, 'price': '9', # price is required but not used in form validation. } form = admin.PackageComponentForm(data=data, instance=instance) self.assertFalse(form.is_valid())
Add unit tests for package component form
Add unit tests for package component form [WAL-450]
Python
mit
opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/waldur-mastermind,opennode/waldur-mastermind,opennode/nodeconductor-assembly-waldur,opennode/nodeconductor-assembly-waldur
<REPLACE_OLD> <REPLACE_NEW> from django.test import TestCase from .. import factories from ... import admin, models class TestPackageComponentForm(TestCase): def test_package_component_form_is_valid_when_component_price_is_0(self): data = { 'monthly_price': '0', 'amount': '2', 'type': models.PackageComponent.Types.RAM, 'price': '9', # price is required but not used in form validation. } form = admin.PackageComponentForm(data=data) self.assertTrue(form.is_valid()) def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self): template = factories.PackageTemplateFactory() factories.OpenStackPackageFactory(template=template) instance = template.components.first() data = { 'monthly_price': '0', 'amount': '2', 'type': instance.type, 'price': '9', # price is required but not used in form validation. } form = admin.PackageComponentForm(data=data, instance=instance) self.assertFalse(form.is_valid()) <REPLACE_END> <|endoftext|> from django.test import TestCase from .. import factories from ... import admin, models class TestPackageComponentForm(TestCase): def test_package_component_form_is_valid_when_component_price_is_0(self): data = { 'monthly_price': '0', 'amount': '2', 'type': models.PackageComponent.Types.RAM, 'price': '9', # price is required but not used in form validation. } form = admin.PackageComponentForm(data=data) self.assertTrue(form.is_valid()) def test_package_component_form_is_invalid_if_package_template_has_connected_packages_already(self): template = factories.PackageTemplateFactory() factories.OpenStackPackageFactory(template=template) instance = template.components.first() data = { 'monthly_price': '0', 'amount': '2', 'type': instance.type, 'price': '9', # price is required but not used in form validation. } form = admin.PackageComponentForm(data=data, instance=instance) self.assertFalse(form.is_valid())
Add unit tests for package component form [WAL-450]
3e1408affa823af2ed95decf52b002614d060a26
pombola/core/tests/test_templatetags.py
pombola/core/tests/test_templatetags.py
from django.test import TestCase from ..templatetags.breadcrumbs import breadcrumbs class BreadcrumbTest(TestCase): def test_breadcrumbs(self): """Check that the breadcrumbs are generated as expected""" home_li = '<li><a href="/" title="Breadcrumb link to the homepage.">Home</a> <span class="sep">&raquo;</span> </li>' tests = ( # input, expected output ( '/', '<li>Home</li>'), ( '/foo', home_li + '<li>Foo</li>'), ( '/foo/bar', home_li + '<li><a href="foo/" title="Breadcrumb link to Foo">Foo</a> <span class="sep">&raquo;</span> </li><li>Bar</li>'), # Test that coordinates are passed through correctly # (don't drop '-', put space after ',') # See issue #762 ( '/-1.23,4.56', home_li + '<li>-1.23, 4.56</li>'), ) for url, expected in tests: actual = breadcrumbs(url) self.assertEqual(actual, expected)
from django.test import TestCase from ..templatetags.breadcrumbs import breadcrumbs from ..templatetags.active_class import active_class class BreadcrumbTest(TestCase): def test_breadcrumbs(self): """Check that the breadcrumbs are generated as expected""" home_li = '<li><a href="/" title="Breadcrumb link to the homepage.">Home</a> <span class="sep">&raquo;</span> </li>' tests = ( # input, expected output ( '/', '<li>Home</li>'), ( '/foo', home_li + '<li>Foo</li>'), ( '/foo/bar', home_li + '<li><a href="foo/" title="Breadcrumb link to Foo">Foo</a> <span class="sep">&raquo;</span> </li><li>Bar</li>'), # Test that coordinates are passed through correctly # (don't drop '-', put space after ',') # See issue #762 ( '/-1.23,4.56', home_li + '<li>-1.23, 4.56</li>'), ) for url, expected in tests: actual = breadcrumbs(url) self.assertEqual(actual, expected) class ActiveClassTest(TestCase): def test_active(self): """Check that active is returned when the url matches the input""" tests = ( ('/', 'home', {}), ('/place/foo/', 'place', {'slug': 'foo'}), ) for current_url, route_name, kwargs in tests: actual = active_class(current_url, route_name, **kwargs) self.assertEqual(' active ', actual) self.assertEqual(active_class('/foo', 'home'), '')
Add tests for active_class templatetag
Add tests for active_class templatetag
Python
agpl-3.0
hzj123/56th,geoffkilpin/pombola,hzj123/56th,geoffkilpin/pombola,mysociety/pombola,hzj123/56th,mysociety/pombola,hzj123/56th,geoffkilpin/pombola,hzj123/56th,patricmutwiri/pombola,patricmutwiri/pombola,mysociety/pombola,geoffkilpin/pombola,patricmutwiri/pombola,ken-muturi/pombola,ken-muturi/pombola,mysociety/pombola,ken-muturi/pombola,ken-muturi/pombola,patricmutwiri/pombola,patricmutwiri/pombola,ken-muturi/pombola,patricmutwiri/pombola,geoffkilpin/pombola,hzj123/56th,geoffkilpin/pombola,ken-muturi/pombola,mysociety/pombola,mysociety/pombola
<REPLACE_OLD> breadcrumbs class <REPLACE_NEW> breadcrumbs from ..templatetags.active_class import active_class class <REPLACE_END> <REPLACE_OLD> expected) <REPLACE_NEW> expected) class ActiveClassTest(TestCase): def test_active(self): """Check that active is returned when the url matches the input""" tests = ( ('/', 'home', {}), ('/place/foo/', 'place', {'slug': 'foo'}), ) for current_url, route_name, kwargs in tests: actual = active_class(current_url, route_name, **kwargs) self.assertEqual(' active ', actual) self.assertEqual(active_class('/foo', 'home'), '') <REPLACE_END> <|endoftext|> from django.test import TestCase from ..templatetags.breadcrumbs import breadcrumbs from ..templatetags.active_class import active_class class BreadcrumbTest(TestCase): def test_breadcrumbs(self): """Check that the breadcrumbs are generated as expected""" home_li = '<li><a href="/" title="Breadcrumb link to the homepage.">Home</a> <span class="sep">&raquo;</span> </li>' tests = ( # input, expected output ( '/', '<li>Home</li>'), ( '/foo', home_li + '<li>Foo</li>'), ( '/foo/bar', home_li + '<li><a href="foo/" title="Breadcrumb link to Foo">Foo</a> <span class="sep">&raquo;</span> </li><li>Bar</li>'), # Test that coordinates are passed through correctly # (don't drop '-', put space after ',') # See issue #762 ( '/-1.23,4.56', home_li + '<li>-1.23, 4.56</li>'), ) for url, expected in tests: actual = breadcrumbs(url) self.assertEqual(actual, expected) class ActiveClassTest(TestCase): def test_active(self): """Check that active is returned when the url matches the input""" tests = ( ('/', 'home', {}), ('/place/foo/', 'place', {'slug': 'foo'}), ) for current_url, route_name, kwargs in tests: actual = active_class(current_url, route_name, **kwargs) self.assertEqual(' active ', actual) self.assertEqual(active_class('/foo', 'home'), '')
Add tests for active_class templatetag from django.test import TestCase from ..templatetags.breadcrumbs import breadcrumbs class BreadcrumbTest(TestCase): def test_breadcrumbs(self): """Check that the breadcrumbs are generated as expected""" home_li = '<li><a href="/" title="Breadcrumb link to the homepage.">Home</a> <span class="sep">&raquo;</span> </li>' tests = ( # input, expected output ( '/', '<li>Home</li>'), ( '/foo', home_li + '<li>Foo</li>'), ( '/foo/bar', home_li + '<li><a href="foo/" title="Breadcrumb link to Foo">Foo</a> <span class="sep">&raquo;</span> </li><li>Bar</li>'), # Test that coordinates are passed through correctly # (don't drop '-', put space after ',') # See issue #762 ( '/-1.23,4.56', home_li + '<li>-1.23, 4.56</li>'), ) for url, expected in tests: actual = breadcrumbs(url) self.assertEqual(actual, expected)
822cb468c033c81d8107d865715d024177b38fcd
CodeFights/doodledPassword.py
CodeFights/doodledPassword.py
#!/usr/local/bin/python # Code Fights Alphabetic Shift Problem from collections import deque def doodledPassword(digits): n = len(digits) res = [deque(digits) for _ in range(n)] deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0) return [list(d) for d in res] def main(): tests = [ [ [1, 2, 3, 4, 5], [ [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2], [4, 5, 1, 2, 3], [5, 1, 2, 3, 4] ] ], [[5], [[5]]], [ [2, 2, 2, 2], [ [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2] ] ], [ [9, 8, 7, 5, 4], [ [9, 8, 7, 5, 4], [8, 7, 5, 4, 9], [7, 5, 4, 9, 8], [5, 4, 9, 8, 7], [4, 9, 8, 7, 5] ] ], [ [1, 5, 1, 5, 1, 4], [ [1, 5, 1, 5, 1, 4], [5, 1, 5, 1, 4, 1], [1, 5, 1, 4, 1, 5], [5, 1, 4, 1, 5, 1], [1, 4, 1, 5, 1, 5], [4, 1, 5, 1, 5, 1] ] ] ] for t in tests: res = doodledPassword(t[0]) if t[1] == res: print("PASSED: doodledPassword({}) returned {}" .format(t[0], res)) else: print("FAILED: doodledPassword({}) returned {}, answer: {}" .format(t[0], res, t[1])) if __name__ == '__main__': main()
Solve Code Fights doodled password problem
Solve Code Fights doodled password problem
Python
mit
HKuz/Test_Code
<REPLACE_OLD> <REPLACE_NEW> #!/usr/local/bin/python # Code Fights Alphabetic Shift Problem from collections import deque def doodledPassword(digits): n = len(digits) res = [deque(digits) for _ in range(n)] deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0) return [list(d) for d in res] def main(): tests = [ [ [1, 2, 3, 4, 5], [ [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2], [4, 5, 1, 2, 3], [5, 1, 2, 3, 4] ] ], [[5], [[5]]], [ [2, 2, 2, 2], [ [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2] ] ], [ [9, 8, 7, 5, 4], [ [9, 8, 7, 5, 4], [8, 7, 5, 4, 9], [7, 5, 4, 9, 8], [5, 4, 9, 8, 7], [4, 9, 8, 7, 5] ] ], [ [1, 5, 1, 5, 1, 4], [ [1, 5, 1, 5, 1, 4], [5, 1, 5, 1, 4, 1], [1, 5, 1, 4, 1, 5], [5, 1, 4, 1, 5, 1], [1, 4, 1, 5, 1, 5], [4, 1, 5, 1, 5, 1] ] ] ] for t in tests: res = doodledPassword(t[0]) if t[1] == res: print("PASSED: doodledPassword({}) returned {}" .format(t[0], res)) else: print("FAILED: doodledPassword({}) returned {}, answer: {}" .format(t[0], res, t[1])) if __name__ == '__main__': main() <REPLACE_END> <|endoftext|> #!/usr/local/bin/python # Code Fights Alphabetic Shift Problem from collections import deque def doodledPassword(digits): n = len(digits) res = [deque(digits) for _ in range(n)] deque(map(lambda i_x: i_x[1].rotate(-i_x[0]), enumerate(res)), 0) return [list(d) for d in res] def main(): tests = [ [ [1, 2, 3, 4, 5], [ [1, 2, 3, 4, 5], [2, 3, 4, 5, 1], [3, 4, 5, 1, 2], [4, 5, 1, 2, 3], [5, 1, 2, 3, 4] ] ], [[5], [[5]]], [ [2, 2, 2, 2], [ [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2], [2, 2, 2, 2] ] ], [ [9, 8, 7, 5, 4], [ [9, 8, 7, 5, 4], [8, 7, 5, 4, 9], [7, 5, 4, 9, 8], [5, 4, 9, 8, 7], [4, 9, 8, 7, 5] ] ], [ [1, 5, 1, 5, 1, 4], [ [1, 5, 1, 5, 1, 4], [5, 1, 5, 1, 4, 1], [1, 5, 1, 4, 1, 5], [5, 1, 4, 1, 5, 1], [1, 4, 1, 5, 1, 5], [4, 1, 5, 1, 5, 1] ] ] ] for t in tests: res = doodledPassword(t[0]) if t[1] == res: print("PASSED: doodledPassword({}) returned {}" .format(t[0], res)) else: print("FAILED: doodledPassword({}) returned {}, answer: {}" .format(t[0], res, t[1])) if __name__ == '__main__': main()
Solve Code Fights doodled password problem
4a92328908f38ec7cc6077289217a802283db03b
examples/list_people.py
examples/list_people.py
#! /usr/bin/python # See README.txt for information and build instructions. import addressbook_pb2 import sys # Iterates though all people in the AddressBook and prints info about them. def ListPeople(address_book): for person in address_book.person: print "Person ID:", person.id print " Name:", person.name if person.HasField('email'): print " E-mail address:", person.email for phone_number in person.phone: if phone_number.type == addressbook_pb2.Person.MOBILE: print " Mobile phone #: ", elif phone_number.type == addressbook_pb2.Person.HOME: print " Home phone #: ", elif phone_number.type == addressbook_pb2.Person.WORK: print " Work phone #: ", print phone_number.number # Main procedure: Reads the entire address book from a file and prints all # the information inside. if len(sys.argv) != 2: print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE" sys.exit(-1) address_book = addressbook_pb2.AddressBook() # Read the existing address book. f = open(sys.argv[1], "rb") address_book.ParseFromString(f.read()) f.close() ListPeople(address_book)
#! /usr/bin/python # See README.txt for information and build instructions. import addressbook_pb2 import sys # Iterates though all people in the AddressBook and prints info about them. def ListPeople(address_book): for person in address_book.person: print "Person ID:", person.id print " Name:", person.name if person.HasField('email'): print " E-mail address:", person.email for phone_number in person.phone: if phone_number.type == addressbook_pb2.Person.MOBILE: print " Mobile phone #:", elif phone_number.type == addressbook_pb2.Person.HOME: print " Home phone #:", elif phone_number.type == addressbook_pb2.Person.WORK: print " Work phone #:", print phone_number.number # Main procedure: Reads the entire address book from a file and prints all # the information inside. if len(sys.argv) != 2: print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE" sys.exit(-1) address_book = addressbook_pb2.AddressBook() # Read the existing address book. f = open(sys.argv[1], "rb") address_book.ParseFromString(f.read()) f.close() ListPeople(address_book)
Make Python example output identical to C++ and Java by removing redundant spaces.
Make Python example output identical to C++ and Java by removing redundant spaces.
Python
bsd-3-clause
LeslieW/protobuf,LeslieW/protobuf,LeslieW/protobuf,LeslieW/protobuf
<REPLACE_OLD> #: ", <REPLACE_NEW> #:", <REPLACE_END> <REPLACE_OLD> #: ", <REPLACE_NEW> #:", <REPLACE_END> <REPLACE_OLD> #: ", <REPLACE_NEW> #:", <REPLACE_END> <|endoftext|> #! /usr/bin/python # See README.txt for information and build instructions. import addressbook_pb2 import sys # Iterates though all people in the AddressBook and prints info about them. def ListPeople(address_book): for person in address_book.person: print "Person ID:", person.id print " Name:", person.name if person.HasField('email'): print " E-mail address:", person.email for phone_number in person.phone: if phone_number.type == addressbook_pb2.Person.MOBILE: print " Mobile phone #:", elif phone_number.type == addressbook_pb2.Person.HOME: print " Home phone #:", elif phone_number.type == addressbook_pb2.Person.WORK: print " Work phone #:", print phone_number.number # Main procedure: Reads the entire address book from a file and prints all # the information inside. if len(sys.argv) != 2: print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE" sys.exit(-1) address_book = addressbook_pb2.AddressBook() # Read the existing address book. f = open(sys.argv[1], "rb") address_book.ParseFromString(f.read()) f.close() ListPeople(address_book)
Make Python example output identical to C++ and Java by removing redundant spaces. #! /usr/bin/python # See README.txt for information and build instructions. import addressbook_pb2 import sys # Iterates though all people in the AddressBook and prints info about them. def ListPeople(address_book): for person in address_book.person: print "Person ID:", person.id print " Name:", person.name if person.HasField('email'): print " E-mail address:", person.email for phone_number in person.phone: if phone_number.type == addressbook_pb2.Person.MOBILE: print " Mobile phone #: ", elif phone_number.type == addressbook_pb2.Person.HOME: print " Home phone #: ", elif phone_number.type == addressbook_pb2.Person.WORK: print " Work phone #: ", print phone_number.number # Main procedure: Reads the entire address book from a file and prints all # the information inside. if len(sys.argv) != 2: print "Usage:", sys.argv[0], "ADDRESS_BOOK_FILE" sys.exit(-1) address_book = addressbook_pb2.AddressBook() # Read the existing address book. f = open(sys.argv[1], "rb") address_book.ParseFromString(f.read()) f.close() ListPeople(address_book)
e2959ec01b25c3f447fdd31608b30f19c2dc3599
engine.py
engine.py
# Use x, y coords for unit positions # (97, 56) ... (104, 56) # ... ... # (97, 49) ... (104, 49) # # Algebraic notation for a position is: # algebraic_pos = chr(x) + chr(y) def _coord_to_algebraic(coord): x, y = coord return chr(x) + chr(y) def _algebraic_to_coord(algebraic): x, y = algebraic[0], algebraic[1] return ord(x), ord(y)
# Use x, y coords for unit positions # (97, 56) ... (104, 56) # ... ... # (97, 49) ... (104, 49) # # Algebraic notation for a position is: # algebraic_pos = chr(x) + chr(y) def _coord_to_algebraic(coord): x, y = coord return chr(x) + chr(y) def _algebraic_to_coord(algebraic): x, y = algebraic[0], algebraic[1] return ord(x), ord(y) def _is_pos_on_board(coord): u"""Return True if coordinate is on the board.""" x, y = coord if (97 <= x <= 104) and (49 <= y <= 56): return True else: return False
Add _is_pos_on_board() to determine if a position is on the board
Add _is_pos_on_board() to determine if a position is on the board
Python
mit
EyuelAbebe/gamer,EyuelAbebe/gamer
<REPLACE_OLD> ord(y) <REPLACE_NEW> ord(y) def _is_pos_on_board(coord): u"""Return True if coordinate is on the board.""" x, y = coord if (97 <= x <= 104) and (49 <= y <= 56): return True else: return False <REPLACE_END> <|endoftext|> # Use x, y coords for unit positions # (97, 56) ... (104, 56) # ... ... # (97, 49) ... (104, 49) # # Algebraic notation for a position is: # algebraic_pos = chr(x) + chr(y) def _coord_to_algebraic(coord): x, y = coord return chr(x) + chr(y) def _algebraic_to_coord(algebraic): x, y = algebraic[0], algebraic[1] return ord(x), ord(y) def _is_pos_on_board(coord): u"""Return True if coordinate is on the board.""" x, y = coord if (97 <= x <= 104) and (49 <= y <= 56): return True else: return False
Add _is_pos_on_board() to determine if a position is on the board # Use x, y coords for unit positions # (97, 56) ... (104, 56) # ... ... # (97, 49) ... (104, 49) # # Algebraic notation for a position is: # algebraic_pos = chr(x) + chr(y) def _coord_to_algebraic(coord): x, y = coord return chr(x) + chr(y) def _algebraic_to_coord(algebraic): x, y = algebraic[0], algebraic[1] return ord(x), ord(y)
1b7509d8bd624bbf33352f622d8c03be6f3e35f2
src/sentry/api/serializers/models/organization_member.py
src/sentry/api/serializers/models/organization_member.py
from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import OrganizationMember @register(OrganizationMember) class OrganizationMemberSerializer(Serializer): def serialize(self, obj, attrs, user): d = { 'id': str(obj.id), 'email': obj.email or obj.user.email, 'access': obj.get_type_display(), 'pending': obj.is_pending, 'dateCreated': obj.date_added, } return d
from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import OrganizationMember from sentry.utils.avatar import get_gravatar_url @register(OrganizationMember) class OrganizationMemberSerializer(Serializer): def serialize(self, obj, attrs, user): d = { 'id': str(obj.id), 'email': obj.email or obj.user.email, 'access': obj.get_type_display(), 'pending': obj.is_pending, 'dateCreated': obj.date_added, 'avatarUrl': get_gravatar_url(obj.email, size=32), } return d
Add avatarUrl to team member serializers
Add avatarUrl to team member serializers Conflicts: src/sentry/api/serializers/models/organization_member.py src/sentry/api/serializers/models/release.py cherry-pick 8ee1bee748ae7f51987ea8ec5ee10795b656cfd9
Python
bsd-3-clause
jean/sentry,gencer/sentry,looker/sentry,ngonzalvez/sentry,gg7/sentry,mvaled/sentry,nicholasserra/sentry,wong2/sentry,beeftornado/sentry,JamesMura/sentry,alexm92/sentry,JamesMura/sentry,korealerts1/sentry,wujuguang/sentry,BayanGroup/sentry,imankulov/sentry,fotinakis/sentry,JTCunning/sentry,kevinlondon/sentry,jean/sentry,gencer/sentry,hongliang5623/sentry,TedaLIEz/sentry,looker/sentry,pauloschilling/sentry,llonchj/sentry,llonchj/sentry,hongliang5623/sentry,Natim/sentry,wong2/sentry,BuildingLink/sentry,fuziontech/sentry,daevaorn/sentry,jokey2k/sentry,argonemyth/sentry,zenefits/sentry,nicholasserra/sentry,daevaorn/sentry,mvaled/sentry,nicholasserra/sentry,ifduyue/sentry,ngonzalvez/sentry,vperron/sentry,ifduyue/sentry,JTCunning/sentry,gencer/sentry,beeftornado/sentry,gg7/sentry,ewdurbin/sentry,mvaled/sentry,fotinakis/sentry,JTCunning/sentry,BayanGroup/sentry,vperron/sentry,drcapulet/sentry,felixbuenemann/sentry,zenefits/sentry,camilonova/sentry,korealerts1/sentry,JackDanger/sentry,kevinastone/sentry,fuziontech/sentry,kevinlondon/sentry,BuildingLink/sentry,alexm92/sentry,kevinlondon/sentry,korealerts1/sentry,drcapulet/sentry,wong2/sentry,pauloschilling/sentry,JamesMura/sentry,kevinastone/sentry,JackDanger/sentry,looker/sentry,daevaorn/sentry,Kryz/sentry,jean/sentry,felixbuenemann/sentry,looker/sentry,Natim/sentry,1tush/sentry,TedaLIEz/sentry,beeftornado/sentry,JamesMura/sentry,ifduyue/sentry,JamesMura/sentry,felixbuenemann/sentry,mvaled/sentry,boneyao/sentry,zenefits/sentry,looker/sentry,Kryz/sentry,zenefits/sentry,argonemyth/sentry,jokey2k/sentry,camilonova/sentry,Natim/sentry,daevaorn/sentry,jean/sentry,mvaled/sentry,wujuguang/sentry,BuildingLink/sentry,wujuguang/sentry,imankulov/sentry,ewdurbin/sentry,fotinakis/sentry,argonemyth/sentry,hongliang5623/sentry,ifduyue/sentry,llonchj/sentry,drcapulet/sentry,1tush/sentry,ewdurbin/sentry,songyi199111/sentry,gg7/sentry,fotinakis/sentry,songyi199111/sentry,Kryz/sentry,boneyao/sentry,jean/sentry,1tush/sentry,jokey2k/sentry,vperron/sentry,fuziontech/sentry,zenefits/sentry,TedaLIEz/sentry,gencer/sentry,kevinastone/sentry,songyi199111/sentry,boneyao/sentry,JackDanger/sentry,mitsuhiko/sentry,BuildingLink/sentry,alexm92/sentry,ngonzalvez/sentry,mvaled/sentry,pauloschilling/sentry,mitsuhiko/sentry,BayanGroup/sentry,imankulov/sentry,ifduyue/sentry,camilonova/sentry,BuildingLink/sentry,gencer/sentry
<REPLACE_OLD> OrganizationMember @register(OrganizationMember) class <REPLACE_NEW> OrganizationMember from sentry.utils.avatar import get_gravatar_url @register(OrganizationMember) class <REPLACE_END> <INSERT> 'avatarUrl': get_gravatar_url(obj.email, size=32), <INSERT_END> <|endoftext|> from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import OrganizationMember from sentry.utils.avatar import get_gravatar_url @register(OrganizationMember) class OrganizationMemberSerializer(Serializer): def serialize(self, obj, attrs, user): d = { 'id': str(obj.id), 'email': obj.email or obj.user.email, 'access': obj.get_type_display(), 'pending': obj.is_pending, 'dateCreated': obj.date_added, 'avatarUrl': get_gravatar_url(obj.email, size=32), } return d
Add avatarUrl to team member serializers Conflicts: src/sentry/api/serializers/models/organization_member.py src/sentry/api/serializers/models/release.py cherry-pick 8ee1bee748ae7f51987ea8ec5ee10795b656cfd9 from __future__ import absolute_import from sentry.api.serializers import Serializer, register from sentry.models import OrganizationMember @register(OrganizationMember) class OrganizationMemberSerializer(Serializer): def serialize(self, obj, attrs, user): d = { 'id': str(obj.id), 'email': obj.email or obj.user.email, 'access': obj.get_type_display(), 'pending': obj.is_pending, 'dateCreated': obj.date_added, } return d
5547f8a11192e9182b6d9aceef99249fc7b9d2cb
froide/publicbody/migrations/0007_auto_20171224_0744.py
froide/publicbody/migrations/0007_auto_20171224_0744.py
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-24 06:44 from __future__ import unicode_literals from django.db import migrations def create_classifications(apps, schema_editor): from ..models import Classification # Use treebeard API # Classification = apps.get_model('publicbody', 'Classification') PublicBody = apps.get_model('publicbody', 'PublicBody') classifications = {} for pb in PublicBody.objects.exclude(classification_slug=''): if pb.classification_slug in classifications: pb.classification = classifications[pb.classification_slug] else: root = Classification.add_root( name=pb.classification_name, slug=pb.classification_slug ) pb.classification = root classifications[pb.classification_slug] = root pb.save() class Migration(migrations.Migration): dependencies = [ ('publicbody', '0006_auto_20171224_0732'), ] operations = [ migrations.RunPython(create_classifications), ]
# -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-24 06:44 from __future__ import unicode_literals from django.db import migrations def create_classifications(apps, schema_editor): from ..models import Classification as RealClassification # Use treebeard API Classification = apps.get_model('publicbody', 'Classification') PublicBody = apps.get_model('publicbody', 'PublicBody') classifications = {} for pb in PublicBody.objects.exclude(classification_slug=''): if pb.classification_slug in classifications: pb.classification = classifications[pb.classification_slug] else: root = Classification( name=pb.classification_name, slug=pb.classification_slug, depth=1, path=RealClassification._get_path(None, 1, len(classifications)) ) root.save() pb.classification = root classifications[pb.classification_slug] = root pb.save() RealClassification.fix_tree() class Migration(migrations.Migration): dependencies = [ ('publicbody', '0006_auto_20171224_0732'), ] operations = [ migrations.RunPython(create_classifications), ]
Fix pb migration, by faking treebeard
Fix pb migration, by faking treebeard
Python
mit
fin/froide,fin/froide,fin/froide,stefanw/froide,stefanw/froide,stefanw/froide,stefanw/froide,fin/froide,stefanw/froide
<INSERT> as RealClassification <INSERT_END> <REPLACE_OLD> API # <REPLACE_NEW> API <REPLACE_END> <REPLACE_OLD> Classification.add_root( <REPLACE_NEW> Classification( <REPLACE_END> <REPLACE_OLD> slug=pb.classification_slug <REPLACE_NEW> slug=pb.classification_slug, depth=1, path=RealClassification._get_path(None, 1, len(classifications)) <REPLACE_END> <INSERT> root.save() <INSERT_END> <REPLACE_OLD> pb.save() class <REPLACE_NEW> pb.save() RealClassification.fix_tree() class <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-24 06:44 from __future__ import unicode_literals from django.db import migrations def create_classifications(apps, schema_editor): from ..models import Classification as RealClassification # Use treebeard API Classification = apps.get_model('publicbody', 'Classification') PublicBody = apps.get_model('publicbody', 'PublicBody') classifications = {} for pb in PublicBody.objects.exclude(classification_slug=''): if pb.classification_slug in classifications: pb.classification = classifications[pb.classification_slug] else: root = Classification( name=pb.classification_name, slug=pb.classification_slug, depth=1, path=RealClassification._get_path(None, 1, len(classifications)) ) root.save() pb.classification = root classifications[pb.classification_slug] = root pb.save() RealClassification.fix_tree() class Migration(migrations.Migration): dependencies = [ ('publicbody', '0006_auto_20171224_0732'), ] operations = [ migrations.RunPython(create_classifications), ]
Fix pb migration, by faking treebeard # -*- coding: utf-8 -*- # Generated by Django 1.11.6 on 2017-12-24 06:44 from __future__ import unicode_literals from django.db import migrations def create_classifications(apps, schema_editor): from ..models import Classification # Use treebeard API # Classification = apps.get_model('publicbody', 'Classification') PublicBody = apps.get_model('publicbody', 'PublicBody') classifications = {} for pb in PublicBody.objects.exclude(classification_slug=''): if pb.classification_slug in classifications: pb.classification = classifications[pb.classification_slug] else: root = Classification.add_root( name=pb.classification_name, slug=pb.classification_slug ) pb.classification = root classifications[pb.classification_slug] = root pb.save() class Migration(migrations.Migration): dependencies = [ ('publicbody', '0006_auto_20171224_0732'), ] operations = [ migrations.RunPython(create_classifications), ]
4a125d2455e1c31043c66835c60cc0e55f9990e9
core/network.py
core/network.py
import codecs from string import Template import os import networkx as nx from networkx.readwrite import json_graph path = os.path.dirname(os.path.abspath(__file__)) def create_network(data): G = nx.DiGraph() for node in data: G.add_node( encode_utf8( node['creator'] ) ) if '___comments' in node: for comment in node['___comments']: G.add_edge( encode_utf8( comment['from']['name'] ), encode_utf8( node['creator'] ) ) d = json_graph.node_link_data(G) html_template = Template( codecs.open( path + '/html/network.html', 'r').read() ) js_template_type = 'svg' if len(d['nodes']) < 500 else 'canvas' js_text_template = Template( codecs.open( path + '/js/network_' + js_template_type +'.js', 'r').read() ) css_text = codecs.open( path + '/css/network.css', 'r').read() js_text = js_text_template.substitute({'nodes' : d['nodes'], 'links' : d['links']}) return html_template.substitute( {'css': css_text, 'js': js_text} ) def encode_utf8( string ): try: return string.encode('utf8') except UnicodeDecodeError: return string
import codecs from string import Template import os import networkx as nx from networkx.readwrite import json_graph path = os.path.dirname(os.path.abspath(__file__)) def create_network(data): G = nx.DiGraph() for node in data: G.add_node( encode_utf8( node['creator'] ) ) if '_comments' in node: for comment in node['_comments']: G.add_edge( encode_utf8( comment['from']['name'] ), encode_utf8( node['creator'] ) ) d = json_graph.node_link_data(G) html_template = Template( codecs.open( path + '/html/network.html', 'r').read() ) js_template_type = 'svg' if len(d['nodes']) < 500 else 'canvas' js_text_template = Template( codecs.open( path + '/js/network_' + js_template_type +'.js', 'r').read() ) css_text = codecs.open( path + '/css/network.css', 'r').read() js_text = js_text_template.substitute({'nodes' : d['nodes'], 'links' : d['links']}) return html_template.substitute( {'css': css_text, 'js': js_text} ) def encode_utf8( string ): try: return string.encode('utf8') except UnicodeDecodeError: return string
Fix variable naming for comments
Fix variable naming for comments
Python
mit
HIIT/hybra-core,HIIT/hybra-core,HIIT/hybra-core,HIIT/hybra-core,HIIT/hybra-core
<REPLACE_OLD> '___comments' <REPLACE_NEW> '_comments' <REPLACE_END> <REPLACE_OLD> node['___comments']: <REPLACE_NEW> node['_comments']: <REPLACE_END> <|endoftext|> import codecs from string import Template import os import networkx as nx from networkx.readwrite import json_graph path = os.path.dirname(os.path.abspath(__file__)) def create_network(data): G = nx.DiGraph() for node in data: G.add_node( encode_utf8( node['creator'] ) ) if '_comments' in node: for comment in node['_comments']: G.add_edge( encode_utf8( comment['from']['name'] ), encode_utf8( node['creator'] ) ) d = json_graph.node_link_data(G) html_template = Template( codecs.open( path + '/html/network.html', 'r').read() ) js_template_type = 'svg' if len(d['nodes']) < 500 else 'canvas' js_text_template = Template( codecs.open( path + '/js/network_' + js_template_type +'.js', 'r').read() ) css_text = codecs.open( path + '/css/network.css', 'r').read() js_text = js_text_template.substitute({'nodes' : d['nodes'], 'links' : d['links']}) return html_template.substitute( {'css': css_text, 'js': js_text} ) def encode_utf8( string ): try: return string.encode('utf8') except UnicodeDecodeError: return string
Fix variable naming for comments import codecs from string import Template import os import networkx as nx from networkx.readwrite import json_graph path = os.path.dirname(os.path.abspath(__file__)) def create_network(data): G = nx.DiGraph() for node in data: G.add_node( encode_utf8( node['creator'] ) ) if '___comments' in node: for comment in node['___comments']: G.add_edge( encode_utf8( comment['from']['name'] ), encode_utf8( node['creator'] ) ) d = json_graph.node_link_data(G) html_template = Template( codecs.open( path + '/html/network.html', 'r').read() ) js_template_type = 'svg' if len(d['nodes']) < 500 else 'canvas' js_text_template = Template( codecs.open( path + '/js/network_' + js_template_type +'.js', 'r').read() ) css_text = codecs.open( path + '/css/network.css', 'r').read() js_text = js_text_template.substitute({'nodes' : d['nodes'], 'links' : d['links']}) return html_template.substitute( {'css': css_text, 'js': js_text} ) def encode_utf8( string ): try: return string.encode('utf8') except UnicodeDecodeError: return string
ee28fdc66fbb0f91821ff18ff219791bf5de8f4d
corehq/apps/fixtures/tasks.py
corehq/apps/fixtures/tasks.py
from __future__ import absolute_import from __future__ import unicode_literals from corehq.apps.fixtures.upload import upload_fixture_file from soil import DownloadBase from celery.task import task @task(serializer='pickle') def fixture_upload_async(domain, download_id, replace): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = upload_fixture_file(domain, download_ref.get_filename(), replace, task) DownloadBase.set_progress(task, 100, 100) return { 'messages': { 'success': result.success, 'messages': result.messages, 'errors': result.errors, 'number_of_fixtures': result.number_of_fixtures, }, } @task(serializer='pickle') def fixture_download_async(prepare_download, *args, **kw): task = fixture_download_async DownloadBase.set_progress(task, 0, 100) prepare_download(task=task, *args, **kw) DownloadBase.set_progress(task, 100, 100)
from __future__ import absolute_import, unicode_literals from celery.task import task from soil import DownloadBase from corehq.apps.fixtures.upload import upload_fixture_file @task def fixture_upload_async(domain, download_id, replace): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = upload_fixture_file(domain, download_ref.get_filename(), replace, task) DownloadBase.set_progress(task, 100, 100) return { 'messages': { 'success': result.success, 'messages': result.messages, 'errors': result.errors, 'number_of_fixtures': result.number_of_fixtures, }, } @task(serializer='pickle') def fixture_download_async(prepare_download, *args, **kw): task = fixture_download_async DownloadBase.set_progress(task, 0, 100) prepare_download(task=task, *args, **kw) DownloadBase.set_progress(task, 100, 100)
Change fixture upload task to json serializer
Change fixture upload task to json serializer
Python
bsd-3-clause
dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq,dimagi/commcare-hq
<REPLACE_OLD> absolute_import from __future__ <REPLACE_NEW> absolute_import, unicode_literals from celery.task <REPLACE_END> <REPLACE_OLD> unicode_literals from <REPLACE_NEW> task from soil import DownloadBase from <REPLACE_END> <REPLACE_OLD> upload_fixture_file from soil import DownloadBase from celery.task import task @task(serializer='pickle') def <REPLACE_NEW> upload_fixture_file @task def <REPLACE_END> <|endoftext|> from __future__ import absolute_import, unicode_literals from celery.task import task from soil import DownloadBase from corehq.apps.fixtures.upload import upload_fixture_file @task def fixture_upload_async(domain, download_id, replace): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = upload_fixture_file(domain, download_ref.get_filename(), replace, task) DownloadBase.set_progress(task, 100, 100) return { 'messages': { 'success': result.success, 'messages': result.messages, 'errors': result.errors, 'number_of_fixtures': result.number_of_fixtures, }, } @task(serializer='pickle') def fixture_download_async(prepare_download, *args, **kw): task = fixture_download_async DownloadBase.set_progress(task, 0, 100) prepare_download(task=task, *args, **kw) DownloadBase.set_progress(task, 100, 100)
Change fixture upload task to json serializer from __future__ import absolute_import from __future__ import unicode_literals from corehq.apps.fixtures.upload import upload_fixture_file from soil import DownloadBase from celery.task import task @task(serializer='pickle') def fixture_upload_async(domain, download_id, replace): task = fixture_upload_async DownloadBase.set_progress(task, 0, 100) download_ref = DownloadBase.get(download_id) result = upload_fixture_file(domain, download_ref.get_filename(), replace, task) DownloadBase.set_progress(task, 100, 100) return { 'messages': { 'success': result.success, 'messages': result.messages, 'errors': result.errors, 'number_of_fixtures': result.number_of_fixtures, }, } @task(serializer='pickle') def fixture_download_async(prepare_download, *args, **kw): task = fixture_download_async DownloadBase.set_progress(task, 0, 100) prepare_download(task=task, *args, **kw) DownloadBase.set_progress(task, 100, 100)
859bc842da554e7e85b3684ade024ee533142d0b
modules/__init__.py
modules/__init__.py
import botconfig from settings import wolfgame as var # Todo: Allow game modes to be set via config # Carry over settings from botconfig into settings/wolfgame.py for setting, value in botconfig.__dict__.items(): if not setting.isupper(): continue # Not a setting if not setting in var.__dict__.keys(): continue # Don't carry over config-only settings # If we got that far, it's valid setattr(var, setting, value)
Add ability to carry settings from botconfig to var.
Add ability to carry settings from botconfig to var.
Python
bsd-2-clause
Cr0wb4r/lykos,billion57/lykos,Diitto/lykos,Agent-Isai/lykos
<INSERT> import botconfig from settings import wolfgame as var # Todo: Allow game modes to be set via config # Carry over settings from botconfig into settings/wolfgame.py for setting, value in botconfig.__dict__.items(): <INSERT_END> <INSERT> if not setting.isupper(): continue # Not a setting if not setting in var.__dict__.keys(): continue # Don't carry over config-only settings # If we got that far, it's valid setattr(var, setting, value) <INSERT_END> <|endoftext|> import botconfig from settings import wolfgame as var # Todo: Allow game modes to be set via config # Carry over settings from botconfig into settings/wolfgame.py for setting, value in botconfig.__dict__.items(): if not setting.isupper(): continue # Not a setting if not setting in var.__dict__.keys(): continue # Don't carry over config-only settings # If we got that far, it's valid setattr(var, setting, value)
Add ability to carry settings from botconfig to var.
92c01be43b80247ce2233851dd74b041bb9d44b0
csunplugged/resources/views/BarcodeChecksumPosterResourceGenerator.py
csunplugged/resources/views/BarcodeChecksumPosterResourceGenerator.py
"""Class for Barcode Checksum Poster resource generator.""" from PIL import Image from utils.BaseResourceGenerator import BaseResourceGenerator class BarcodeChecksumPosterResourceGenerator(BaseResourceGenerator): """Class for Grid resource generator.""" additional_valid_options = { "barcode_length": ["12", "13"] } def data(self): """Create data for a copy of the Grid resource. Returns: A dictionary of the one page for the resource. """ image_path = "static/img/resources/barcode-checksum-poster/{}-digits.png" image_path = image_path.format(self.requested_options["barcode_length"]) image = Image.open(image_path) return {"type": "image", "data": image} @property def subtitle(self): """Return the subtitle string of the resource. Used after the resource name in the filename, and also on the resource image. Returns: text for subtitle (str). """ barcode_length = self.requested_options["barcode_length"] return "{} digits - {}".format(barcode_length, super().subtitle)
"""Class for Barcode Checksum Poster resource generator.""" from PIL import Image, ImageDraw from utils.BaseResourceGenerator import BaseResourceGenerator from utils.TextBoxDrawer import TextBoxDrawer from django.utils.translation import ugettext as _ class BarcodeChecksumPosterResourceGenerator(BaseResourceGenerator): """Class for Grid resource generator.""" additional_valid_options = { "barcode_length": ["12", "13"] } def data(self): """Create data for a copy of the Grid resource. Returns: A dictionary of the one page for the resource. """ path = "static/img/resources/barcode-checksum-poster/{}-digits" path = path.format(self.requested_options["barcode_length"]) image_path = "{}.png".format(path) svg_path = "{}.svg".format(path) image = Image.open(image_path) draw = ImageDraw.Draw(image) textbox_drawer = TextBoxDrawer(image, draw, svg_path) textbox_drawer.write_text_box( "title", _("13 Digit Barcode"), horiz_just="center", vert_just="center", ) headings = { "heading1": _("Separate!"), "heading2": _("Operate!"), "heading3": _("Calculate!") } for heading_id, heading in headings.items(): textbox_drawer.write_text_box( heading_id, heading, ) textbox_drawer.write_text_box( "paragraph", _("Remember that this algorithm uses modulo 10, so we are only " "interested in the number in the one's column."), ) return {"type": "image", "data": image} @property def subtitle(self): """Return the subtitle string of the resource. Used after the resource name in the filename, and also on the resource image. Returns: text for subtitle (str). """ barcode_length = self.requested_options["barcode_length"] return "{} digits - {}".format(barcode_length, super().subtitle)
Modify Barcode Checksum Poster resource to dynamically overlay text
Modify Barcode Checksum Poster resource to dynamically overlay text
Python
mit
uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged,uccser/cs-unplugged
<REPLACE_OLD> Image from <REPLACE_NEW> Image, ImageDraw from <REPLACE_END> <REPLACE_OLD> BaseResourceGenerator class <REPLACE_NEW> BaseResourceGenerator from utils.TextBoxDrawer import TextBoxDrawer from django.utils.translation import ugettext as _ class <REPLACE_END> <INSERT> path = "static/img/resources/barcode-checksum-poster/{}-digits" path = path.format(self.requested_options["barcode_length"]) <INSERT_END> <REPLACE_OLD> "static/img/resources/barcode-checksum-poster/{}-digits.png" image_path = image_path.format(self.requested_options["barcode_length"]) <REPLACE_NEW> "{}.png".format(path) svg_path = "{}.svg".format(path) <REPLACE_END> <REPLACE_OLD> Image.open(image_path) <REPLACE_NEW> Image.open(image_path) draw = ImageDraw.Draw(image) textbox_drawer = TextBoxDrawer(image, draw, svg_path) textbox_drawer.write_text_box( "title", _("13 Digit Barcode"), horiz_just="center", vert_just="center", ) headings = { "heading1": _("Separate!"), "heading2": _("Operate!"), "heading3": _("Calculate!") } for heading_id, heading in headings.items(): textbox_drawer.write_text_box( heading_id, heading, ) textbox_drawer.write_text_box( "paragraph", _("Remember that this algorithm uses modulo 10, so we are only " "interested in the number in the one's column."), ) <REPLACE_END> <|endoftext|> """Class for Barcode Checksum Poster resource generator.""" from PIL import Image, ImageDraw from utils.BaseResourceGenerator import BaseResourceGenerator from utils.TextBoxDrawer import TextBoxDrawer from django.utils.translation import ugettext as _ class BarcodeChecksumPosterResourceGenerator(BaseResourceGenerator): """Class for Grid resource generator.""" additional_valid_options = { "barcode_length": ["12", "13"] } def data(self): """Create data for a copy of the Grid resource. Returns: A dictionary of the one page for the resource. """ path = "static/img/resources/barcode-checksum-poster/{}-digits" path = path.format(self.requested_options["barcode_length"]) image_path = "{}.png".format(path) svg_path = "{}.svg".format(path) image = Image.open(image_path) draw = ImageDraw.Draw(image) textbox_drawer = TextBoxDrawer(image, draw, svg_path) textbox_drawer.write_text_box( "title", _("13 Digit Barcode"), horiz_just="center", vert_just="center", ) headings = { "heading1": _("Separate!"), "heading2": _("Operate!"), "heading3": _("Calculate!") } for heading_id, heading in headings.items(): textbox_drawer.write_text_box( heading_id, heading, ) textbox_drawer.write_text_box( "paragraph", _("Remember that this algorithm uses modulo 10, so we are only " "interested in the number in the one's column."), ) return {"type": "image", "data": image} @property def subtitle(self): """Return the subtitle string of the resource. Used after the resource name in the filename, and also on the resource image. Returns: text for subtitle (str). """ barcode_length = self.requested_options["barcode_length"] return "{} digits - {}".format(barcode_length, super().subtitle)
Modify Barcode Checksum Poster resource to dynamically overlay text """Class for Barcode Checksum Poster resource generator.""" from PIL import Image from utils.BaseResourceGenerator import BaseResourceGenerator class BarcodeChecksumPosterResourceGenerator(BaseResourceGenerator): """Class for Grid resource generator.""" additional_valid_options = { "barcode_length": ["12", "13"] } def data(self): """Create data for a copy of the Grid resource. Returns: A dictionary of the one page for the resource. """ image_path = "static/img/resources/barcode-checksum-poster/{}-digits.png" image_path = image_path.format(self.requested_options["barcode_length"]) image = Image.open(image_path) return {"type": "image", "data": image} @property def subtitle(self): """Return the subtitle string of the resource. Used after the resource name in the filename, and also on the resource image. Returns: text for subtitle (str). """ barcode_length = self.requested_options["barcode_length"] return "{} digits - {}".format(barcode_length, super().subtitle)
9ea8b1f1f4ccc068b460e76127f288742d25088e
django/contrib/comments/feeds.py
django/contrib/comments/feeds.py
from django.conf import settings from django.contrib.syndication.feeds import Feed from django.contrib.sites.models import Site from django.contrib import comments class LatestCommentFeed(Feed): """Feed of latest comments on the current site.""" def title(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"%s comments" % self._site.name def link(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return "http://%s/" % (self._site.domain) def description(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"Latest comments on %s" % self._site.name def items(self): qs = comments.get_model().objects.filter( site__pk = settings.SITE_ID, is_public = True, is_removed = False, ) if getattr(settings, 'COMMENTS_BANNED_USERS_GROUP', None): where = ['user_id NOT IN (SELECT user_id FROM auth_users_group WHERE group_id = %s)'] params = [settings.COMMENTS_BANNED_USERS_GROUP] qs = qs.extra(where=where, params=params) return qs.order_by('-submit_date')[:40] def item_pubdate(self, item): return item.submit_date
from django.conf import settings from django.contrib.syndication.feeds import Feed from django.contrib.sites.models import Site from django.contrib import comments class LatestCommentFeed(Feed): """Feed of latest comments on the current site.""" def title(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"%s comments" % self._site.name def link(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return "http://%s/" % (self._site.domain) def description(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"Latest comments on %s" % self._site.name def items(self): qs = comments.get_model().objects.filter( site__pk = settings.SITE_ID, is_public = True, is_removed = False, ) if getattr(settings, 'COMMENTS_BANNED_USERS_GROUP', None): where = ['user_id NOT IN (SELECT user_id FROM auth_user_groups WHERE group_id = %s)'] params = [settings.COMMENTS_BANNED_USERS_GROUP] qs = qs.extra(where=where, params=params) return qs.order_by('-submit_date')[:40] def item_pubdate(self, item): return item.submit_date
Use correct m2m join table name in LatestCommentsFeed
Use correct m2m join table name in LatestCommentsFeed git-svn-id: 4f9f921b081c523744c7bf24d959a0db39629563@9089 bcc190cf-cafb-0310-a4f2-bffc1f526a37
Python
bsd-3-clause
sam-tsai/django-old,skevy/django,dcramer/django-compositepks,alex/django-old,Instagram/django,dcramer/django-compositepks,django-nonrel/django-nonrel,alex/django-old,Smarsh/django,Smarsh/django,dcramer/django-compositepks,bfirsh/django-old,bfirsh/django-old,disqus/django-old,t11e/django,mitsuhiko/django,Instagram/django,alex/django-old,Instagram/django,django-nonrel/django-nonrel,sam-tsai/django-old,Smarsh/django,t11e/django,t11e/django,bfirsh/django-old,jamespacileo/django-france,sam-tsai/django-old,mitsuhiko/django,disqus/django-old,skevy/django,jamespacileo/django-france,sam-tsai/django-old,django-nonrel/django-nonrel,Smarsh/django,mitsuhiko/django,skevy/django,jamespacileo/django-france,disqus/django-old
<REPLACE_OLD> auth_users_group <REPLACE_NEW> auth_user_groups <REPLACE_END> <|endoftext|> from django.conf import settings from django.contrib.syndication.feeds import Feed from django.contrib.sites.models import Site from django.contrib import comments class LatestCommentFeed(Feed): """Feed of latest comments on the current site.""" def title(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"%s comments" % self._site.name def link(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return "http://%s/" % (self._site.domain) def description(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"Latest comments on %s" % self._site.name def items(self): qs = comments.get_model().objects.filter( site__pk = settings.SITE_ID, is_public = True, is_removed = False, ) if getattr(settings, 'COMMENTS_BANNED_USERS_GROUP', None): where = ['user_id NOT IN (SELECT user_id FROM auth_user_groups WHERE group_id = %s)'] params = [settings.COMMENTS_BANNED_USERS_GROUP] qs = qs.extra(where=where, params=params) return qs.order_by('-submit_date')[:40] def item_pubdate(self, item): return item.submit_date
Use correct m2m join table name in LatestCommentsFeed git-svn-id: 4f9f921b081c523744c7bf24d959a0db39629563@9089 bcc190cf-cafb-0310-a4f2-bffc1f526a37 from django.conf import settings from django.contrib.syndication.feeds import Feed from django.contrib.sites.models import Site from django.contrib import comments class LatestCommentFeed(Feed): """Feed of latest comments on the current site.""" def title(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"%s comments" % self._site.name def link(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return "http://%s/" % (self._site.domain) def description(self): if not hasattr(self, '_site'): self._site = Site.objects.get_current() return u"Latest comments on %s" % self._site.name def items(self): qs = comments.get_model().objects.filter( site__pk = settings.SITE_ID, is_public = True, is_removed = False, ) if getattr(settings, 'COMMENTS_BANNED_USERS_GROUP', None): where = ['user_id NOT IN (SELECT user_id FROM auth_users_group WHERE group_id = %s)'] params = [settings.COMMENTS_BANNED_USERS_GROUP] qs = qs.extra(where=where, params=params) return qs.order_by('-submit_date')[:40] def item_pubdate(self, item): return item.submit_date
384beaa77e2eaad642ec7f764acd09c2c3e04350
res_company.py
res_company.py
from openerp.osv import osv, fields from openerp.tools.translate import _ class res_company(osv.Model): _inherit = "res.company" _columns = { 'remittance_letter_top': fields.text( _('Remittance Letter - top message'), help=_('Message to write at the top of Remittance Letter ' 'reports. Available variables: "$iban" for the IBAN; "$date" for ' 'the payment date. HTML tags are allowed.') ), 'remittance_letter_bottom': fields.text( _('Remittance Letter - bottom message'), help=_('Message to write at the bottom of Remittance Letter ' 'reports. HTML tags are allowed.') ), }
from openerp.osv import osv, fields from openerp.tools.translate import _ class res_company(osv.Model): _inherit = "res.company" _columns = { 'remittance_letter_top': fields.text( _('Remittance Letter - top message'), help=_('Message to write at the top of Remittance Letter ' 'reports. Available variables: "$iban" for the IBAN; "$date" for ' 'the payment date. HTML tags are allowed.'), translate=True), 'remittance_letter_bottom': fields.text( _('Remittance Letter - bottom message'), help=_('Message to write at the bottom of Remittance Letter ' 'reports. HTML tags are allowed.'), translate=True), }
Make Remittance Letter config messages translatable
Make Remittance Letter config messages translatable
Python
agpl-3.0
xcgd/account_streamline
<REPLACE_OLD> allowed.') <REPLACE_NEW> allowed.'), <REPLACE_END> <REPLACE_OLD> ), <REPLACE_NEW> translate=True), <REPLACE_END> <REPLACE_OLD> allowed.') <REPLACE_NEW> allowed.'), <REPLACE_END> <REPLACE_OLD> ), <REPLACE_NEW> translate=True), <REPLACE_END> <|endoftext|> from openerp.osv import osv, fields from openerp.tools.translate import _ class res_company(osv.Model): _inherit = "res.company" _columns = { 'remittance_letter_top': fields.text( _('Remittance Letter - top message'), help=_('Message to write at the top of Remittance Letter ' 'reports. Available variables: "$iban" for the IBAN; "$date" for ' 'the payment date. HTML tags are allowed.'), translate=True), 'remittance_letter_bottom': fields.text( _('Remittance Letter - bottom message'), help=_('Message to write at the bottom of Remittance Letter ' 'reports. HTML tags are allowed.'), translate=True), }
Make Remittance Letter config messages translatable from openerp.osv import osv, fields from openerp.tools.translate import _ class res_company(osv.Model): _inherit = "res.company" _columns = { 'remittance_letter_top': fields.text( _('Remittance Letter - top message'), help=_('Message to write at the top of Remittance Letter ' 'reports. Available variables: "$iban" for the IBAN; "$date" for ' 'the payment date. HTML tags are allowed.') ), 'remittance_letter_bottom': fields.text( _('Remittance Letter - bottom message'), help=_('Message to write at the bottom of Remittance Letter ' 'reports. HTML tags are allowed.') ), }
3ef4e68ae64a46f09103001f391b3d6a3d098e33
test/test_bezier_direct.py
test/test_bezier_direct.py
from __future__ import division import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # import cocos from cocos.director import director from cocos.actions import Bezier from cocos.sprite import Sprite import pyglet from cocos import path def direct_bezier(p0, p1, p2, p3): '''Given four points, returns a bezier path that go through them. It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and t=0.6 respectively. ''' def _one_dim(p0xy, B1xy, B2xy, p3xy): '''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3. p0: P sub 0 of bezier, it's also B(0) B1: B(0.4) B2: B(0.6) p3: P sub 3 of bezier, it's also B(1) ''' p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36 p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288 return p1xy, p2xy bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0]) bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1]) bp1 = bp1x, bp1y bp2 = bp2x, bp2y bezier_path = path.Bezier(p0, p3, bp1, bp2) return bezier_path class TestLayer(cocos.layer.Layer): def __init__(self): super( TestLayer, self ).__init__() go_through = [(100,300), (370,330), (430,270), (750,550)] # visually spot through where it should go for pos in go_through: sprite = Sprite('fire.png') sprite.position = pos sprite.scale = .3 self.add(sprite) # calculate the points bezier_path = direct_bezier(*go_through) sprite = Sprite('fire.png') sprite.scale = .3 sprite.color = (0, 0, 255) self.add(sprite) sprite.do(Bezier(bezier_path, 5)) if __name__ == "__main__": director.init(width=800, height=600) test_layer = TestLayer () main_scene = cocos.scene.Scene (test_layer) director.run (main_scene)
Test using bezier going through 4 specific points
Test using bezier going through 4 specific points git-svn-id: 5665c17dde288ce6190d85f4a2d6486351776710@869 f663ce52-ac46-0410-b8de-c1c220b0eb76
Python
bsd-3-clause
eevee/cocos2d-mirror
<REPLACE_OLD> <REPLACE_NEW> from __future__ import division import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # import cocos from cocos.director import director from cocos.actions import Bezier from cocos.sprite import Sprite import pyglet from cocos import path def direct_bezier(p0, p1, p2, p3): '''Given four points, returns a bezier path that go through them. It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and t=0.6 respectively. ''' def _one_dim(p0xy, B1xy, B2xy, p3xy): '''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3. p0: P sub 0 of bezier, it's also B(0) B1: B(0.4) B2: B(0.6) p3: P sub 3 of bezier, it's also B(1) ''' p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36 p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288 return p1xy, p2xy bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0]) bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1]) bp1 = bp1x, bp1y bp2 = bp2x, bp2y bezier_path = path.Bezier(p0, p3, bp1, bp2) return bezier_path class TestLayer(cocos.layer.Layer): def __init__(self): super( TestLayer, self ).__init__() go_through = [(100,300), (370,330), (430,270), (750,550)] # visually spot through where it should go for pos in go_through: sprite = Sprite('fire.png') sprite.position = pos sprite.scale = .3 self.add(sprite) # calculate the points bezier_path = direct_bezier(*go_through) sprite = Sprite('fire.png') sprite.scale = .3 sprite.color = (0, 0, 255) self.add(sprite) sprite.do(Bezier(bezier_path, 5)) if __name__ == "__main__": director.init(width=800, height=600) test_layer = TestLayer () main_scene = cocos.scene.Scene (test_layer) director.run (main_scene) <REPLACE_END> <|endoftext|> from __future__ import division import sys import os sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..')) # import cocos from cocos.director import director from cocos.actions import Bezier from cocos.sprite import Sprite import pyglet from cocos import path def direct_bezier(p0, p1, p2, p3): '''Given four points, returns a bezier path that go through them. It starts in p0, finish in p3, and pass through p1 and p2 in t=0.4 and t=0.6 respectively. ''' def _one_dim(p0xy, B1xy, B2xy, p3xy): '''Calculates the p1 and p2 to draw through B1 and B2 given p0 and p3. p0: P sub 0 of bezier, it's also B(0) B1: B(0.4) B2: B(0.6) p3: P sub 3 of bezier, it's also B(1) ''' p2xy = (1.5 * B2xy - B1xy + 0.12 * p0xy - 0.26 * p3xy) / 0.36 p1xy = (B2xy - 0.064 * p0xy - 0.432 * p2xy - 0.216 * p3xy) / 0.288 return p1xy, p2xy bp1x, bp2x = _one_dim(p0[0], p1[0], p2[0], p3[0]) bp1y, bp2y = _one_dim(p0[1], p1[1], p2[1], p3[1]) bp1 = bp1x, bp1y bp2 = bp2x, bp2y bezier_path = path.Bezier(p0, p3, bp1, bp2) return bezier_path class TestLayer(cocos.layer.Layer): def __init__(self): super( TestLayer, self ).__init__() go_through = [(100,300), (370,330), (430,270), (750,550)] # visually spot through where it should go for pos in go_through: sprite = Sprite('fire.png') sprite.position = pos sprite.scale = .3 self.add(sprite) # calculate the points bezier_path = direct_bezier(*go_through) sprite = Sprite('fire.png') sprite.scale = .3 sprite.color = (0, 0, 255) self.add(sprite) sprite.do(Bezier(bezier_path, 5)) if __name__ == "__main__": director.init(width=800, height=600) test_layer = TestLayer () main_scene = cocos.scene.Scene (test_layer) director.run (main_scene)
Test using bezier going through 4 specific points git-svn-id: 5665c17dde288ce6190d85f4a2d6486351776710@869 f663ce52-ac46-0410-b8de-c1c220b0eb76
6423bb87a392bf6f8abd3b04a0a1bab3181542a0
run_time/src/gae_server/font_mapper.py
run_time/src/gae_server/font_mapper.py
""" Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os import path tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path
""" Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os import path tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' elif fontname[0:8] == 'NotoSans': family_dir = 'NotoSans/' elif fontname[0:5] == 'Arimo': family_dir = 'Arimo/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path
Add support for NotoSans and Arimo.
Add support for NotoSans and Arimo.
Python
apache-2.0
googlefonts/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,googlei18n/TachyFont,moyogo/tachyfont,bstell/TachyFont,bstell/TachyFont,bstell/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,googlei18n/TachyFont,googlei18n/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,bstell/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,bstell/TachyFont,googlefonts/TachyFont
<INSERT> elif fontname[0:8] == 'NotoSans': family_dir = 'NotoSans/' elif fontname[0:5] == 'Arimo': family_dir = 'Arimo/' <INSERT_END> <|endoftext|> """ Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os import path tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' elif fontname[0:8] == 'NotoSans': family_dir = 'NotoSans/' elif fontname[0:5] == 'Arimo': family_dir = 'Arimo/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path
Add support for NotoSans and Arimo. """ Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from os import path tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path
3698f0a51056f32c7595c1baca578d25764cc768
cnddh/config_prd.py
cnddh/config_prd.py
AMBIENTE = u'linux' PROD = False DEBUG = False LOG = False LOGPATH = './Application.log' EMAIL_HOST = 'smtp.gmail.com' EMAIL_LOGIN = 'xxxxxxxx@cnddh.org.br' EMAIL_PASSWORD = 'senha' EMAIL_PORT = 587 DATABASE_URI = 'mysql://root:senha@localhost/cnddh_db' ECHO = False UPLOADS_DEFAULT_DEST = r'C:\Temp\upload' TIMEZONE = 'America/Sao_Paulo' LOCALE = 'pt_BR'
AMBIENTE = u'linux' PROD = False DEBUG = False LOG = False LOGPATH = './Application.log' EMAIL_HOST = 'smtp.gmail.com' EMAIL_LOGIN = 'xxxxxxxx@cnddh.org.br' EMAIL_PASSWORD = 'senha' EMAIL_PORT = 587 DATABASE_URI = 'mysql://root:senha@localhost/cnddh_db' ECHO = False SQLALCHEMY_TRACK_MODIFICATIONS = False UPLOADS_DEFAULT_DEST = r'C:\Temp\upload' TIMEZONE = 'America/Sao_Paulo' LOCALE = 'pt_BR'
Add some key in config
Add some key in config
Python
apache-2.0
dedeco/cnddh-denuncias,dedeco/cnddh-denuncias,dedeco/cnddh-denuncias
<INSERT> False SQLALCHEMY_TRACK_MODIFICATIONS = <INSERT_END> <|endoftext|> AMBIENTE = u'linux' PROD = False DEBUG = False LOG = False LOGPATH = './Application.log' EMAIL_HOST = 'smtp.gmail.com' EMAIL_LOGIN = 'xxxxxxxx@cnddh.org.br' EMAIL_PASSWORD = 'senha' EMAIL_PORT = 587 DATABASE_URI = 'mysql://root:senha@localhost/cnddh_db' ECHO = False SQLALCHEMY_TRACK_MODIFICATIONS = False UPLOADS_DEFAULT_DEST = r'C:\Temp\upload' TIMEZONE = 'America/Sao_Paulo' LOCALE = 'pt_BR'
Add some key in config AMBIENTE = u'linux' PROD = False DEBUG = False LOG = False LOGPATH = './Application.log' EMAIL_HOST = 'smtp.gmail.com' EMAIL_LOGIN = 'xxxxxxxx@cnddh.org.br' EMAIL_PASSWORD = 'senha' EMAIL_PORT = 587 DATABASE_URI = 'mysql://root:senha@localhost/cnddh_db' ECHO = False UPLOADS_DEFAULT_DEST = r'C:\Temp\upload' TIMEZONE = 'America/Sao_Paulo' LOCALE = 'pt_BR'
903c0d6a3bda96a0b193cc6efd2f8e868d4d82e2
setuptools/tests/test_build_ext.py
setuptools/tests/test_build_ext.py
"""build_ext tests """ import unittest from distutils.command.build_ext import build_ext as distutils_build_ext from setuptools.command.build_ext import build_ext from setuptools.dist import Distribution class TestBuildExtTest(unittest.TestCase): def test_get_ext_filename(self): # setuptools needs to give back the same # result than distutils, even if the fullname # is not in ext_map dist = Distribution() cmd = build_ext(dist) cmd.ext_map['foo/bar'] = '' res = cmd.get_ext_filename('foo') wanted = distutils_build_ext.get_ext_filename(cmd, 'foo') assert res == wanted
"""build_ext tests """ import unittest import distutils.command.build_ext as orig from setuptools.command.build_ext import build_ext from setuptools.dist import Distribution class TestBuildExtTest(unittest.TestCase): def test_get_ext_filename(self): # setuptools needs to give back the same # result than distutils, even if the fullname # is not in ext_map dist = Distribution() cmd = build_ext(dist) cmd.ext_map['foo/bar'] = '' res = cmd.get_ext_filename('foo') wanted = orig.build_ext.get_ext_filename(cmd, 'foo') assert res == wanted
Use namespacing for easier reading
Use namespacing for easier reading
Python
mit
pypa/setuptools,pypa/setuptools,pypa/setuptools
<REPLACE_OLD> unittest from <REPLACE_NEW> unittest import <REPLACE_END> <DELETE> import build_ext <DELETE_END> <REPLACE_OLD> distutils_build_ext from <REPLACE_NEW> orig from <REPLACE_END> <REPLACE_OLD> distutils_build_ext.get_ext_filename(cmd, <REPLACE_NEW> orig.build_ext.get_ext_filename(cmd, <REPLACE_END> <|endoftext|> """build_ext tests """ import unittest import distutils.command.build_ext as orig from setuptools.command.build_ext import build_ext from setuptools.dist import Distribution class TestBuildExtTest(unittest.TestCase): def test_get_ext_filename(self): # setuptools needs to give back the same # result than distutils, even if the fullname # is not in ext_map dist = Distribution() cmd = build_ext(dist) cmd.ext_map['foo/bar'] = '' res = cmd.get_ext_filename('foo') wanted = orig.build_ext.get_ext_filename(cmd, 'foo') assert res == wanted
Use namespacing for easier reading """build_ext tests """ import unittest from distutils.command.build_ext import build_ext as distutils_build_ext from setuptools.command.build_ext import build_ext from setuptools.dist import Distribution class TestBuildExtTest(unittest.TestCase): def test_get_ext_filename(self): # setuptools needs to give back the same # result than distutils, even if the fullname # is not in ext_map dist = Distribution() cmd = build_ext(dist) cmd.ext_map['foo/bar'] = '' res = cmd.get_ext_filename('foo') wanted = distutils_build_ext.get_ext_filename(cmd, 'foo') assert res == wanted
764e0b742351c07dda5657fb2dc46f45dce4a3ef
migrations/versions/86b41c3dbd00_add_indexes_on_driver_for_licence_and_.py
migrations/versions/86b41c3dbd00_add_indexes_on_driver_for_licence_and_.py
"""Add indexes on driver for licence and departement Revision ID: 86b41c3dbd00 Revises: ccd5b0142a76 Create Date: 2019-10-21 15:55:35.965422 """ # revision identifiers, used by Alembic. revision = '86b41c3dbd00' down_revision = 'ccd5b0142a76' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False) op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False) def downgrade(): op.drop_index('driver_professional_licence_idx', table_name='driver') op.drop_index('driver_departement_id_idx', table_name='driver')
Add migration to create index on driver for departement and licence
Add migration to create index on driver for departement and licence
Python
agpl-3.0
openmaraude/APITaxi,openmaraude/APITaxi
<INSERT> """Add indexes on driver for licence and departement Revision ID: 86b41c3dbd00 Revises: ccd5b0142a76 Create Date: 2019-10-21 15:55:35.965422 """ # revision identifiers, used by Alembic. revision = '86b41c3dbd00' down_revision = 'ccd5b0142a76' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): <INSERT_END> <INSERT> op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False) op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False) def downgrade(): op.drop_index('driver_professional_licence_idx', table_name='driver') op.drop_index('driver_departement_id_idx', table_name='driver') <INSERT_END> <|endoftext|> """Add indexes on driver for licence and departement Revision ID: 86b41c3dbd00 Revises: ccd5b0142a76 Create Date: 2019-10-21 15:55:35.965422 """ # revision identifiers, used by Alembic. revision = '86b41c3dbd00' down_revision = 'ccd5b0142a76' from alembic import op import sqlalchemy as sa from sqlalchemy.dialects import postgresql def upgrade(): op.create_index('driver_departement_id_idx', 'driver', ['departement_id'], unique=False) op.create_index('driver_professional_licence_idx', 'driver', ['professional_licence'], unique=False) def downgrade(): op.drop_index('driver_professional_licence_idx', table_name='driver') op.drop_index('driver_departement_id_idx', table_name='driver')
Add migration to create index on driver for departement and licence
cce2869ac56fe3576e519884fd2a68d75a7fe1cb
backend/scripts/countdups.py
backend/scripts/countdups.py
#!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
Add in script to count duplicates.
Add in script to count duplicates.
Python
mit
materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org,materials-commons/materialscommons.org
<INSERT> #!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): <INSERT_END> <INSERT> total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files) <INSERT_END> <|endoftext|> #!/usr/bin/env python import rethinkdb as r conn = r.connect('localhost', 30815, db='materialscommons') rql = r.table('datafiles').filter(r.row['usesid'].match("^[0-9a-f]")).pluck('size') total_bytes = 0 total_files = 0 for doc in rql.run(conn): total_bytes = total_bytes + doc['size'] total_files = total_files + 1 print "Total bytes = %s for %d dups" %(format(total_bytes, ",d"), total_files)
Add in script to count duplicates.
507a52905164d2814b0b43a6d61eb002dfe0662a
enerdata/datetime/work_and_holidays.py
enerdata/datetime/work_and_holidays.py
import calendar from datetime import timedelta def get_num_of_workdays_holidays(init_date, end_date, holidays_list): workdays = 0 holidays = 0 _date = end_date while _date <= init_date: if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6) ) or (_date.date() in holidays_list): holidays += 1 else: workdays += 1 _date += timedelta(days=1) return workdays, holidays
Add work and holidays get
Add work and holidays get
Python
mit
gisce/enerdata
<INSERT> import calendar from datetime import timedelta def get_num_of_workdays_holidays(init_date, end_date, holidays_list): <INSERT_END> <INSERT> workdays = 0 holidays = 0 _date = end_date while _date <= init_date: if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6) ) or (_date.date() in holidays_list): holidays += 1 else: workdays += 1 _date += timedelta(days=1) return workdays, holidays <INSERT_END> <|endoftext|> import calendar from datetime import timedelta def get_num_of_workdays_holidays(init_date, end_date, holidays_list): workdays = 0 holidays = 0 _date = end_date while _date <= init_date: if (calendar.weekday(_date.year, _date.month, _date.day) in (5, 6) ) or (_date.date() in holidays_list): holidays += 1 else: workdays += 1 _date += timedelta(days=1) return workdays, holidays
Add work and holidays get
6e6aa02907b3d156174cfe1a5f8e9c274c080778
SegNetCMR/helpers.py
SegNetCMR/helpers.py
import tensorflow as tf def add_output_images(images, logits, labels): cast_labels = tf.cast(labels, tf.uint8) * 128 cast_labels = cast_labels[...,None] tf.summary.image('input_labels', cast_labels, max_outputs=3) classification1 = tf.nn.softmax(logits = logits, dim=-1)[...,1] output_image_gb = images[...,0] output_image_r = classification1 + tf.multiply(images[...,0], (1-classification1)) output_image = tf.stack([output_image_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_mixed', output_image, max_outputs=3) output_image_binary = tf.argmax(logits, 3) output_image_binary = tf.cast(output_image_binary[...,None], tf.float32) * 128/255 tf.summary.image('output_labels', output_image_binary, max_outputs=3) return
import tensorflow as tf def add_output_images(images, logits, labels): cast_labels = tf.cast(labels, tf.uint8) * 128 cast_labels = cast_labels[...,None] tf.summary.image('input_labels', cast_labels, max_outputs=3) classification1 = tf.nn.softmax(logits = logits, dim=-1)[...,1] output_image_gb = images[...,0] output_image_r = classification1 + tf.multiply(images[...,0], (1-classification1)) output_image = tf.stack([output_image_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_mixed', output_image, max_outputs=3) output_image_binary = tf.argmax(logits, 3) output_image_binary = tf.cast(output_image_binary[...,None], tf.float32) * 128/255 tf.summary.image('output_labels', output_image_binary, max_outputs=3) output_labels_mixed_r = output_image_binary[...,0] + tf.multiply(images[...,0], (1-output_image_binary[...,0])) output_labels_mixed = tf.stack([output_labels_mixed_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_labels_mixed', output_labels_mixed, max_outputs=3) return
Add output with images mixed with binary version of output labels
Add output with images mixed with binary version of output labels
Python
mit
mshunshin/SegNetCMR,mshunshin/SegNetCMR
<INSERT> output_labels_mixed_r = output_image_binary[...,0] + tf.multiply(images[...,0], (1-output_image_binary[...,0])) output_labels_mixed = tf.stack([output_labels_mixed_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_labels_mixed', output_labels_mixed, max_outputs=3) <INSERT_END> <|endoftext|> import tensorflow as tf def add_output_images(images, logits, labels): cast_labels = tf.cast(labels, tf.uint8) * 128 cast_labels = cast_labels[...,None] tf.summary.image('input_labels', cast_labels, max_outputs=3) classification1 = tf.nn.softmax(logits = logits, dim=-1)[...,1] output_image_gb = images[...,0] output_image_r = classification1 + tf.multiply(images[...,0], (1-classification1)) output_image = tf.stack([output_image_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_mixed', output_image, max_outputs=3) output_image_binary = tf.argmax(logits, 3) output_image_binary = tf.cast(output_image_binary[...,None], tf.float32) * 128/255 tf.summary.image('output_labels', output_image_binary, max_outputs=3) output_labels_mixed_r = output_image_binary[...,0] + tf.multiply(images[...,0], (1-output_image_binary[...,0])) output_labels_mixed = tf.stack([output_labels_mixed_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_labels_mixed', output_labels_mixed, max_outputs=3) return
Add output with images mixed with binary version of output labels import tensorflow as tf def add_output_images(images, logits, labels): cast_labels = tf.cast(labels, tf.uint8) * 128 cast_labels = cast_labels[...,None] tf.summary.image('input_labels', cast_labels, max_outputs=3) classification1 = tf.nn.softmax(logits = logits, dim=-1)[...,1] output_image_gb = images[...,0] output_image_r = classification1 + tf.multiply(images[...,0], (1-classification1)) output_image = tf.stack([output_image_r, output_image_gb, output_image_gb], axis=3) tf.summary.image('output_mixed', output_image, max_outputs=3) output_image_binary = tf.argmax(logits, 3) output_image_binary = tf.cast(output_image_binary[...,None], tf.float32) * 128/255 tf.summary.image('output_labels', output_image_binary, max_outputs=3) return
29eb3661ace0f3dd62d210621ebd24ef95261162
src/listen.py
src/listen.py
import redis import re from common import get_db from datetime import datetime MSGPATTERN = re.compile('^(\w+)\|(\d)\|([\s\S]*)$') CHANNEL = 'logfire' def listen(args): global MSGPATTERN rserver = redis.Redis('localhost') pubsub = rserver.pubsub() pubsub.subscribe(CHANNEL) db = get_db(args.mongohost) for packet in pubsub.listen(): try: if packet['type'] != 'message': continue match = MSGPATTERN.match(packet['data']) component = match.group(1) level = int(match.group(2)) message = match.group(3) db.insert(dict( tstamp=datetime.now(),comp=component,lvl=level,msg=message)) except Exception, e: print e, packet
import redis import re from common import get_db from datetime import datetime MSGPATTERN = re.compile('^(\w+)\|(\d)\|([\s\S]*)$') CHANNEL = 'logfire' def listen(args): global MSGPATTERN rserver = redis.Redis('localhost') pubsub = rserver.pubsub() pubsub.subscribe(CHANNEL) db = get_db(args.mongohost) for packet in pubsub.listen(): try: if packet['type'] != 'message': continue match = MSGPATTERN.match(packet['data']) component = match.group(1) level = int(match.group(2)) message = match.group(3) db.insert(dict( tstamp=datetime.utcnow(),comp=component,lvl=level,msg=message)) except Exception, e: print e, packet
Make sure timestamp of log message is UTC when it goes into DB
Make sure timestamp of log message is UTC when it goes into DB
Python
mit
jay3sh/logfire,jay3sh/logfire
<REPLACE_OLD> tstamp=datetime.now(),comp=component,lvl=level,msg=message)) <REPLACE_NEW> tstamp=datetime.utcnow(),comp=component,lvl=level,msg=message)) <REPLACE_END> <|endoftext|> import redis import re from common import get_db from datetime import datetime MSGPATTERN = re.compile('^(\w+)\|(\d)\|([\s\S]*)$') CHANNEL = 'logfire' def listen(args): global MSGPATTERN rserver = redis.Redis('localhost') pubsub = rserver.pubsub() pubsub.subscribe(CHANNEL) db = get_db(args.mongohost) for packet in pubsub.listen(): try: if packet['type'] != 'message': continue match = MSGPATTERN.match(packet['data']) component = match.group(1) level = int(match.group(2)) message = match.group(3) db.insert(dict( tstamp=datetime.utcnow(),comp=component,lvl=level,msg=message)) except Exception, e: print e, packet
Make sure timestamp of log message is UTC when it goes into DB import redis import re from common import get_db from datetime import datetime MSGPATTERN = re.compile('^(\w+)\|(\d)\|([\s\S]*)$') CHANNEL = 'logfire' def listen(args): global MSGPATTERN rserver = redis.Redis('localhost') pubsub = rserver.pubsub() pubsub.subscribe(CHANNEL) db = get_db(args.mongohost) for packet in pubsub.listen(): try: if packet['type'] != 'message': continue match = MSGPATTERN.match(packet['data']) component = match.group(1) level = int(match.group(2)) message = match.group(3) db.insert(dict( tstamp=datetime.now(),comp=component,lvl=level,msg=message)) except Exception, e: print e, packet
f617c1ce192739594c161f717d5d04cc17ede22e
distarray/local/tests/paralleltest_io.py
distarray/local/tests/paralleltest_io.py
import tempfile import h5py from numpy.testing import assert_allclose from os import path from distarray.local import LocalArray, save, load, save_hdf5 from distarray.testing import comm_null_passes, MpiTestCase class TestFlatFileIO(MpiTestCase): @comm_null_passes def test_flat_file_read_write(self): larr0 = LocalArray((7,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save(output_path, larr0) larr1 = load(output_path, comm=self.comm) self.assertTrue(isinstance(larr1, LocalArray)) assert_allclose(larr0, larr1) class TestHDF5FileIO(MpiTestCase): @comm_null_passes def test_hdf5_file_write(self): dataset = "data" larr0 = LocalArray((51,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save_hdf5(output_path, larr0, dataset=dataset) self.assertTrue(path.exists(output_path)) fp = h5py.File(output_path, 'r') self.assertTrue("data" in fp) fp.close()
import tempfile from numpy.testing import assert_allclose from os import path from distarray.local import LocalArray, save, load, save_hdf5 from distarray.testing import comm_null_passes, MpiTestCase class TestFlatFileIO(MpiTestCase): @comm_null_passes def test_flat_file_read_write(self): larr0 = LocalArray((7,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save(output_path, larr0) larr1 = load(output_path, comm=self.comm) self.assertTrue(isinstance(larr1, LocalArray)) assert_allclose(larr0, larr1) class TestHDF5FileIO(MpiTestCase): @comm_null_passes def test_hdf5_file_write(self): import h5py dataset = "data" larr0 = LocalArray((51,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save_hdf5(output_path, larr0, dataset=dataset) self.assertTrue(path.exists(output_path)) fp = h5py.File(output_path, 'r') self.assertTrue("data" in fp) fp.close()
Move h5py import into HDF test.
Move h5py import into HDF test.
Python
bsd-3-clause
RaoUmer/distarray,enthought/distarray,RaoUmer/distarray,enthought/distarray
<REPLACE_OLD> tempfile import h5py from <REPLACE_NEW> tempfile from <REPLACE_END> <INSERT> import h5py <INSERT_END> <|endoftext|> import tempfile from numpy.testing import assert_allclose from os import path from distarray.local import LocalArray, save, load, save_hdf5 from distarray.testing import comm_null_passes, MpiTestCase class TestFlatFileIO(MpiTestCase): @comm_null_passes def test_flat_file_read_write(self): larr0 = LocalArray((7,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save(output_path, larr0) larr1 = load(output_path, comm=self.comm) self.assertTrue(isinstance(larr1, LocalArray)) assert_allclose(larr0, larr1) class TestHDF5FileIO(MpiTestCase): @comm_null_passes def test_hdf5_file_write(self): import h5py dataset = "data" larr0 = LocalArray((51,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save_hdf5(output_path, larr0, dataset=dataset) self.assertTrue(path.exists(output_path)) fp = h5py.File(output_path, 'r') self.assertTrue("data" in fp) fp.close()
Move h5py import into HDF test. import tempfile import h5py from numpy.testing import assert_allclose from os import path from distarray.local import LocalArray, save, load, save_hdf5 from distarray.testing import comm_null_passes, MpiTestCase class TestFlatFileIO(MpiTestCase): @comm_null_passes def test_flat_file_read_write(self): larr0 = LocalArray((7,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save(output_path, larr0) larr1 = load(output_path, comm=self.comm) self.assertTrue(isinstance(larr1, LocalArray)) assert_allclose(larr0, larr1) class TestHDF5FileIO(MpiTestCase): @comm_null_passes def test_hdf5_file_write(self): dataset = "data" larr0 = LocalArray((51,), comm=self.comm) output_dir = tempfile.gettempdir() filename = 'outfile' output_path = path.join(output_dir, filename) save_hdf5(output_path, larr0, dataset=dataset) self.assertTrue(path.exists(output_path)) fp = h5py.File(output_path, 'r') self.assertTrue("data" in fp) fp.close()
0461fad1a3d81aa2d937a1734f1ebb07b3e81d79
undercloud_heat_plugins/server_update_allowed.py
undercloud_heat_plugins/server_update_allowed.py
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def needs_replace_with_prop_diff(self, changed_properties_set, after_props, before_props): return False def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
Fix no-replace-server to accurately preview update
Fix no-replace-server to accurately preview update This override of OS::Nova::Server needs to reflect the fact that it never replaces on update or the update --dry-run output ends up being wrong. Closes-Bug: 1561076 Change-Id: I9256872b877fbe7f91befb52995c62de006210ef
Python
apache-2.0
openstack/tripleo-common,openstack/tripleo-common
<REPLACE_OLD> server.Server.properties_schema.keys() def <REPLACE_NEW> server.Server.properties_schema.keys() def needs_replace_with_prop_diff(self, changed_properties_set, after_props, before_props): return False def <REPLACE_END> <|endoftext|> # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def needs_replace_with_prop_diff(self, changed_properties_set, after_props, before_props): return False def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
Fix no-replace-server to accurately preview update This override of OS::Nova::Server needs to reflect the fact that it never replaces on update or the update --dry-run output ends up being wrong. Closes-Bug: 1561076 Change-Id: I9256872b877fbe7f91befb52995c62de006210ef # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from heat.engine.resources.openstack.nova import server class ServerUpdateAllowed(server.Server): '''Prevent any properties changes from replacing an existing server. ''' update_allowed_properties = server.Server.properties_schema.keys() def resource_mapping(): return {'OS::Nova::Server': ServerUpdateAllowed}
e4869e9e348dd5a0441edc0aca7c0298c2c892a9
mopidy/backends/dummy.py
mopidy/backends/dummy.py
from __future__ import unicode_literals # Make classes previously residing here available in the old location for # backwards compatibility with extensions targeting Mopidy < 0.18. from mopidy.backend.dummy import * # noqa
Make old DummyBackend imports work
backend: Make old DummyBackend imports work
Python
apache-2.0
glogiotatidis/mopidy,vrs01/mopidy,mokieyue/mopidy,quartz55/mopidy,jmarsik/mopidy,diandiankan/mopidy,jmarsik/mopidy,dbrgn/mopidy,bencevans/mopidy,mopidy/mopidy,bencevans/mopidy,mokieyue/mopidy,tkem/mopidy,hkariti/mopidy,bencevans/mopidy,glogiotatidis/mopidy,swak/mopidy,ZenithDK/mopidy,kingosticks/mopidy,diandiankan/mopidy,abarisain/mopidy,vrs01/mopidy,jodal/mopidy,diandiankan/mopidy,jodal/mopidy,kingosticks/mopidy,mopidy/mopidy,hkariti/mopidy,pacificIT/mopidy,bacontext/mopidy,jmarsik/mopidy,ali/mopidy,ZenithDK/mopidy,ali/mopidy,jcass77/mopidy,bacontext/mopidy,rawdlite/mopidy,dbrgn/mopidy,adamcik/mopidy,pacificIT/mopidy,jodal/mopidy,bacontext/mopidy,tkem/mopidy,adamcik/mopidy,ZenithDK/mopidy,glogiotatidis/mopidy,jcass77/mopidy,quartz55/mopidy,dbrgn/mopidy,pacificIT/mopidy,SuperStarPL/mopidy,SuperStarPL/mopidy,bencevans/mopidy,hkariti/mopidy,diandiankan/mopidy,glogiotatidis/mopidy,jcass77/mopidy,hkariti/mopidy,ali/mopidy,dbrgn/mopidy,liamw9534/mopidy,tkem/mopidy,pacificIT/mopidy,quartz55/mopidy,bacontext/mopidy,woutervanwijk/mopidy,adamcik/mopidy,jmarsik/mopidy,rawdlite/mopidy,swak/mopidy,rawdlite/mopidy,ZenithDK/mopidy,mokieyue/mopidy,rawdlite/mopidy,priestd09/mopidy,SuperStarPL/mopidy,tkem/mopidy,swak/mopidy,priestd09/mopidy,SuperStarPL/mopidy,priestd09/mopidy,kingosticks/mopidy,woutervanwijk/mopidy,quartz55/mopidy,liamw9534/mopidy,swak/mopidy,mokieyue/mopidy,vrs01/mopidy,mopidy/mopidy,vrs01/mopidy,abarisain/mopidy,ali/mopidy
<INSERT> from __future__ import unicode_literals # Make classes previously residing here available in the old location for # backwards compatibility with extensions targeting Mopidy < 0.18. from mopidy.backend.dummy import * <INSERT_END> <INSERT> # noqa <INSERT_END> <|endoftext|> from __future__ import unicode_literals # Make classes previously residing here available in the old location for # backwards compatibility with extensions targeting Mopidy < 0.18. from mopidy.backend.dummy import * # noqa
backend: Make old DummyBackend imports work
80529d5032b6728adcaad426310c30b5e6366ad4
solution.py
solution.py
class Kiosk(): def __init__(self, visit_cost, location): self.visit_cost = visit_cost self.location = location print 'initializing Kiosk' #patient shold be Person def visit(self, patient): if not patient.location == self.location: print 'patient not in correct location' return False if not patient.money>self.visit_cost: print 'patient cannot afford treatment' #patient should be Person def visit(self, patient): patient.money -= visit_cost #improve patient.diabetes #improve patient.cardio return True #Patient should be from class Person def filter(self, patient): if not patient.location == self.location: print "patient not at proper location" return False if not patient.money>self.visit_cost: print "patient cannot afford treatment" return False visit(self,patient)
class Kiosk(): def __init__(self, location, visit_cost, diabetes_threshold, cardio_threshold): self.location = location self.visit_cost = visit_cost self.diabetes_threshold = diabetes_threshold self.cardio_threshold = cardio_threshold #Initial cost to create kiosk: $5000. We are measuring in rupees self.money = -309900 print 'initializing Kiosk' #patient shold be Person def visit(self, patient): if not patient.location == self.location: print 'patient not in correct location' return False if not patient.money>self.visit_cost: print 'patient cannot afford treatment' patient.money -= visit_cost kiosk.money += visit_cost #If we diagnose diabetes if patient.diabetes<diabetes_threshold: #For now, we ignore the details and just improve the patient's #health. patient.diabetes = diabetes_threshold #If we diagnose cardiovascular problems if patient.cardio<cardio_threshold: #For now, we ignore the details and just improve the patient's #health. patient.cardio = cardio_threshold #The visit was successful return True
Clean up and finish Kiosk class
Clean up and finish Kiosk class There was some redundancy because I merged it poorly
Python
bsd-3-clause
rkawauchi/IHK,rkawauchi/IHK
<INSERT> location, <INSERT_END> <REPLACE_OLD> location): <REPLACE_NEW> diabetes_threshold, cardio_threshold): self.location = location <REPLACE_END> <REPLACE_OLD> self.location = location <REPLACE_NEW> self.diabetes_threshold = diabetes_threshold self.cardio_threshold = cardio_threshold #Initial cost to create kiosk: $5000. We are measuring in rupees self.money = -309900 <REPLACE_END> <REPLACE_OLD> treatment' #patient should be Person def visit(self, patient): <REPLACE_NEW> treatment' <REPLACE_END> <REPLACE_OLD> #improve patient.diabetes #improve patient.cardio <REPLACE_NEW> kiosk.money += visit_cost #If we diagnose diabetes if patient.diabetes<diabetes_threshold: #For now, we ignore the details and just improve the patient's #health. patient.diabetes = diabetes_threshold #If we diagnose cardiovascular problems if patient.cardio<cardio_threshold: #For now, we ignore the details and just improve the patient's #health. patient.cardio = cardio_threshold #The visit was successful <REPLACE_END> <REPLACE_OLD> True #Patient should be from class Person def filter(self, patient): if not patient.location == self.location: print "patient not at proper location" return False if not patient.money>self.visit_cost: print "patient cannot afford treatment" return False visit(self,patient) <REPLACE_NEW> True <REPLACE_END> <|endoftext|> class Kiosk(): def __init__(self, location, visit_cost, diabetes_threshold, cardio_threshold): self.location = location self.visit_cost = visit_cost self.diabetes_threshold = diabetes_threshold self.cardio_threshold = cardio_threshold #Initial cost to create kiosk: $5000. We are measuring in rupees self.money = -309900 print 'initializing Kiosk' #patient shold be Person def visit(self, patient): if not patient.location == self.location: print 'patient not in correct location' return False if not patient.money>self.visit_cost: print 'patient cannot afford treatment' patient.money -= visit_cost kiosk.money += visit_cost #If we diagnose diabetes if patient.diabetes<diabetes_threshold: #For now, we ignore the details and just improve the patient's #health. patient.diabetes = diabetes_threshold #If we diagnose cardiovascular problems if patient.cardio<cardio_threshold: #For now, we ignore the details and just improve the patient's #health. patient.cardio = cardio_threshold #The visit was successful return True
Clean up and finish Kiosk class There was some redundancy because I merged it poorly class Kiosk(): def __init__(self, visit_cost, location): self.visit_cost = visit_cost self.location = location print 'initializing Kiosk' #patient shold be Person def visit(self, patient): if not patient.location == self.location: print 'patient not in correct location' return False if not patient.money>self.visit_cost: print 'patient cannot afford treatment' #patient should be Person def visit(self, patient): patient.money -= visit_cost #improve patient.diabetes #improve patient.cardio return True #Patient should be from class Person def filter(self, patient): if not patient.location == self.location: print "patient not at proper location" return False if not patient.money>self.visit_cost: print "patient cannot afford treatment" return False visit(self,patient)
dfdeaf536466cfa8003af4cd5341d1d7127ea6b7
py/_test_py2go.py
py/_test_py2go.py
#!/usr/bin/env python import datetime def return_true(): return True def return_false(): return False def return_int(): return 123 def return_float(): return 1.0 def return_string(): return "ABC" def return_bytearray(): return bytearray('abcdefg') def return_array(): return [1, 2, {"key": 3}] def return_map(): return {"key1": 123, "key2": "str"} def return_nested_map(): return {"key1": {"key2": 123}} def return_none(): return None def return_timestamp(): return datetime.datetime(2015, 4, 1, 14, 27, 0, 500*1000, None)
#!/usr/bin/env python import datetime def return_true(): return True def return_false(): return False def return_int(): return 123 def return_float(): return 1.0 def return_string(): return "ABC" def return_bytearray(): return bytearray('abcdefg') def return_array(): return [1, 2, {"key": 3}] def return_map(): return {"key1": 123, "key2": "str"} def return_nested_map(): return {"key1": {"key2": 123}} def return_none(): return None def return_timestamp(): return datetime.datetime(2015, 4, 1, 14, 27, 0, 500*1000, None)
Update python script for pep8 style
Update python script for pep8 style
Python
mit
sensorbee/py,sensorbee/py
<REPLACE_OLD> datetime def <REPLACE_NEW> datetime def <REPLACE_END> <REPLACE_OLD> True def <REPLACE_NEW> True def <REPLACE_END> <REPLACE_OLD> False def <REPLACE_NEW> False def <REPLACE_END> <REPLACE_OLD> 123 def <REPLACE_NEW> 123 def <REPLACE_END> <REPLACE_OLD> 1.0 def <REPLACE_NEW> 1.0 def <REPLACE_END> <REPLACE_OLD> "ABC" def <REPLACE_NEW> "ABC" def <REPLACE_END> <REPLACE_OLD> bytearray('abcdefg') def <REPLACE_NEW> bytearray('abcdefg') def <REPLACE_END> <REPLACE_OLD> 3}] def <REPLACE_NEW> 3}] def <REPLACE_END> <REPLACE_OLD> "str"} def <REPLACE_NEW> "str"} def <REPLACE_END> <REPLACE_OLD> 123}} def <REPLACE_NEW> 123}} def <REPLACE_END> <REPLACE_OLD> None def <REPLACE_NEW> None def <REPLACE_END> <|endoftext|> #!/usr/bin/env python import datetime def return_true(): return True def return_false(): return False def return_int(): return 123 def return_float(): return 1.0 def return_string(): return "ABC" def return_bytearray(): return bytearray('abcdefg') def return_array(): return [1, 2, {"key": 3}] def return_map(): return {"key1": 123, "key2": "str"} def return_nested_map(): return {"key1": {"key2": 123}} def return_none(): return None def return_timestamp(): return datetime.datetime(2015, 4, 1, 14, 27, 0, 500*1000, None)
Update python script for pep8 style #!/usr/bin/env python import datetime def return_true(): return True def return_false(): return False def return_int(): return 123 def return_float(): return 1.0 def return_string(): return "ABC" def return_bytearray(): return bytearray('abcdefg') def return_array(): return [1, 2, {"key": 3}] def return_map(): return {"key1": 123, "key2": "str"} def return_nested_map(): return {"key1": {"key2": 123}} def return_none(): return None def return_timestamp(): return datetime.datetime(2015, 4, 1, 14, 27, 0, 500*1000, None)
b3540f744efbcb0f14f9b4081aeffda1f5ccae3c
pyscraper/patchfilter.py
pyscraper/patchfilter.py
#! /usr/bin/python2.3 # vim:sw=8:ts=8:et:nowrap import os import shutil def ApplyPatches(filein, fileout): # Generate short name such as wrans/answers2003-03-31.html (rest, name) = os.path.split(filein) (rest, dir) = os.path.split(rest) fileshort = os.path.join(dir, name) # Look for a patch file from our collection (which is # in the pyscraper/patches folder in Public Whip CVS) patchfile = os.path.join("patches", fileshort + ".patch") if not os.path.isfile(patchfile): return False while True: # Apply the patch shutil.copyfile(filein, fileout) # delete temporary file that might have been created by a previous patch failure filoutorg = fileout + ".orig" if os.path.isfile(filoutorg): os.remove(filoutorg) status = os.system("patch --quiet %s <%s" % (fileout, patchfile)) if status == 0: return True print "Error running 'patch' on file %s, blanking it out" % fileshort os.rename(patchfile, patchfile + ".old~") blankfile = open(patchfile, "w") blankfile.close()
#! /usr/bin/python2.3 # vim:sw=8:ts=8:et:nowrap import os import shutil def ApplyPatches(filein, fileout): # Generate short name such as wrans/answers2003-03-31.html (rest, name) = os.path.split(filein) (rest, dir) = os.path.split(rest) fileshort = os.path.join(dir, name) # Look for a patch file from our collection (which is # in the pyscraper/patches folder in Public Whip CVS) patchfile = os.path.join("patches", fileshort + ".patch") if not os.path.isfile(patchfile): return False while True: # Apply the patch shutil.copyfile(filein, fileout) # delete temporary file that might have been created by a previous patch failure filoutorg = fileout + ".orig" if os.path.isfile(filoutorg): os.remove(filoutorg) status = os.system("patch --quiet %s <%s" % (fileout, patchfile)) if status == 0: return True raise Exception, "Error running 'patch' on file %s" % fileshort #print "blanking out %s" % fileshort #os.rename(patchfile, patchfile + ".old~") #blankfile = open(patchfile, "w") #blankfile.close()
Remove code which blanks patch files
Remove code which blanks patch files
Python
agpl-3.0
mysociety/publicwhip,mysociety/publicwhip,mysociety/publicwhip
<REPLACE_OLD> print <REPLACE_NEW> raise Exception, <REPLACE_END> <REPLACE_OLD> %s, blanking it out" <REPLACE_NEW> %s" % fileshort #print "blanking out %s" <REPLACE_END> <REPLACE_OLD> os.rename(patchfile, <REPLACE_NEW> #os.rename(patchfile, <REPLACE_END> <REPLACE_OLD> blankfile <REPLACE_NEW> #blankfile <REPLACE_END> <REPLACE_OLD> blankfile.close() <REPLACE_NEW> #blankfile.close() <REPLACE_END> <|endoftext|> #! /usr/bin/python2.3 # vim:sw=8:ts=8:et:nowrap import os import shutil def ApplyPatches(filein, fileout): # Generate short name such as wrans/answers2003-03-31.html (rest, name) = os.path.split(filein) (rest, dir) = os.path.split(rest) fileshort = os.path.join(dir, name) # Look for a patch file from our collection (which is # in the pyscraper/patches folder in Public Whip CVS) patchfile = os.path.join("patches", fileshort + ".patch") if not os.path.isfile(patchfile): return False while True: # Apply the patch shutil.copyfile(filein, fileout) # delete temporary file that might have been created by a previous patch failure filoutorg = fileout + ".orig" if os.path.isfile(filoutorg): os.remove(filoutorg) status = os.system("patch --quiet %s <%s" % (fileout, patchfile)) if status == 0: return True raise Exception, "Error running 'patch' on file %s" % fileshort #print "blanking out %s" % fileshort #os.rename(patchfile, patchfile + ".old~") #blankfile = open(patchfile, "w") #blankfile.close()
Remove code which blanks patch files #! /usr/bin/python2.3 # vim:sw=8:ts=8:et:nowrap import os import shutil def ApplyPatches(filein, fileout): # Generate short name such as wrans/answers2003-03-31.html (rest, name) = os.path.split(filein) (rest, dir) = os.path.split(rest) fileshort = os.path.join(dir, name) # Look for a patch file from our collection (which is # in the pyscraper/patches folder in Public Whip CVS) patchfile = os.path.join("patches", fileshort + ".patch") if not os.path.isfile(patchfile): return False while True: # Apply the patch shutil.copyfile(filein, fileout) # delete temporary file that might have been created by a previous patch failure filoutorg = fileout + ".orig" if os.path.isfile(filoutorg): os.remove(filoutorg) status = os.system("patch --quiet %s <%s" % (fileout, patchfile)) if status == 0: return True print "Error running 'patch' on file %s, blanking it out" % fileshort os.rename(patchfile, patchfile + ".old~") blankfile = open(patchfile, "w") blankfile.close()
8aea526176592511581ddbeb6f3bb96ce072cc91
wukong/__init__.py
wukong/__init__.py
# Set up a null roothandler for our logging system import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler())
# Set up a null roothandler for our logging system import logging from logging import NullHandler logging.getLogger(__name__).addHandler(NullHandler())
Remove the NullHandler patch because we don't support any python versions that need it
Remove the NullHandler patch because we don't support any python versions that need it
Python
mit
SurveyMonkey/wukong
<REPLACE_OLD> logging try: # Python 2.7+ from <REPLACE_NEW> logging from <REPLACE_END> <REPLACE_OLD> NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler()) <REPLACE_NEW> NullHandler logging.getLogger(__name__).addHandler(NullHandler()) <REPLACE_END> <|endoftext|> # Set up a null roothandler for our logging system import logging from logging import NullHandler logging.getLogger(__name__).addHandler(NullHandler())
Remove the NullHandler patch because we don't support any python versions that need it # Set up a null roothandler for our logging system import logging try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass logging.getLogger(__name__).addHandler(NullHandler())
e249e1c03fab60c2f09a171924f3a1f701a0c7aa
astropy/tests/image_tests.py
astropy/tests/image_tests.py
import matplotlib from matplotlib import pyplot as plt from ..utils.decorators import wraps MPL_VERSION = matplotlib.__version__ ROOT = "http://{server}/testing/astropy/2018-02-01T23:31:45.013149/{mpl_version}/" IMAGE_REFERENCE_DIR = ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') def ignore_matplotlibrc(func): # This is a decorator for tests that use matplotlib but not pytest-mpl # (which already handles rcParams) @wraps(func) def wrapper(*args, **kwargs): with plt.style.context({}, after_reset=True): return func(*args, **kwargs) return wrapper
import matplotlib from matplotlib import pyplot as plt from ..utils.decorators import wraps MPL_VERSION = matplotlib.__version__ ROOT = "http://{server}/testing/astropy/2018-02-01T23:31:45.013149/{mpl_version}/" IMAGE_REFERENCE_DIR = (ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') + ',' + ROOT.format(server='www.astropy.org/astropy-data', mpl_version=MPL_VERSION[:3] + '.x')) def ignore_matplotlibrc(func): # This is a decorator for tests that use matplotlib but not pytest-mpl # (which already handles rcParams) @wraps(func) def wrapper(*args, **kwargs): with plt.style.context({}, after_reset=True): return func(*args, **kwargs) return wrapper
Add back mirror for image tests
Add back mirror for image tests
Python
bsd-3-clause
pllim/astropy,stargaser/astropy,MSeifert04/astropy,funbaker/astropy,larrybradley/astropy,astropy/astropy,pllim/astropy,lpsinger/astropy,saimn/astropy,MSeifert04/astropy,mhvk/astropy,dhomeier/astropy,aleksandr-bakanov/astropy,StuartLittlefair/astropy,bsipocz/astropy,astropy/astropy,lpsinger/astropy,bsipocz/astropy,DougBurke/astropy,saimn/astropy,aleksandr-bakanov/astropy,pllim/astropy,bsipocz/astropy,bsipocz/astropy,funbaker/astropy,MSeifert04/astropy,funbaker/astropy,funbaker/astropy,StuartLittlefair/astropy,StuartLittlefair/astropy,astropy/astropy,mhvk/astropy,larrybradley/astropy,pllim/astropy,stargaser/astropy,stargaser/astropy,mhvk/astropy,DougBurke/astropy,astropy/astropy,saimn/astropy,lpsinger/astropy,lpsinger/astropy,dhomeier/astropy,saimn/astropy,mhvk/astropy,DougBurke/astropy,larrybradley/astropy,aleksandr-bakanov/astropy,dhomeier/astropy,saimn/astropy,larrybradley/astropy,dhomeier/astropy,DougBurke/astropy,stargaser/astropy,dhomeier/astropy,StuartLittlefair/astropy,aleksandr-bakanov/astropy,astropy/astropy,mhvk/astropy,larrybradley/astropy,StuartLittlefair/astropy,lpsinger/astropy,MSeifert04/astropy,pllim/astropy
<REPLACE_OLD> ROOT.format(server='data.astropy.org', <REPLACE_NEW> (ROOT.format(server='data.astropy.org', <REPLACE_END> <REPLACE_OLD> '.x') def <REPLACE_NEW> '.x') + ',' + ROOT.format(server='www.astropy.org/astropy-data', mpl_version=MPL_VERSION[:3] + '.x')) def <REPLACE_END> <|endoftext|> import matplotlib from matplotlib import pyplot as plt from ..utils.decorators import wraps MPL_VERSION = matplotlib.__version__ ROOT = "http://{server}/testing/astropy/2018-02-01T23:31:45.013149/{mpl_version}/" IMAGE_REFERENCE_DIR = (ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') + ',' + ROOT.format(server='www.astropy.org/astropy-data', mpl_version=MPL_VERSION[:3] + '.x')) def ignore_matplotlibrc(func): # This is a decorator for tests that use matplotlib but not pytest-mpl # (which already handles rcParams) @wraps(func) def wrapper(*args, **kwargs): with plt.style.context({}, after_reset=True): return func(*args, **kwargs) return wrapper
Add back mirror for image tests import matplotlib from matplotlib import pyplot as plt from ..utils.decorators import wraps MPL_VERSION = matplotlib.__version__ ROOT = "http://{server}/testing/astropy/2018-02-01T23:31:45.013149/{mpl_version}/" IMAGE_REFERENCE_DIR = ROOT.format(server='data.astropy.org', mpl_version=MPL_VERSION[:3] + '.x') def ignore_matplotlibrc(func): # This is a decorator for tests that use matplotlib but not pytest-mpl # (which already handles rcParams) @wraps(func) def wrapper(*args, **kwargs): with plt.style.context({}, after_reset=True): return func(*args, **kwargs) return wrapper
12bbc7e10ae52328feb766e2bed5f5f20fa0d354
pyramid_es/__init__.py
pyramid_es/__init__.py
from pyramid.settings import asbool from .client import ElasticClient def client_from_config(settings, prefix='elastic.'): """ Instantiate and configure an Elasticsearch from settings. In typical Pyramid usage, you shouldn't use this directly: instead, just include ``pyramid_es`` and use the :py:func:`get_client` function to get access to the shared :py:class:`.client.ElasticClient` instance. """ return ElasticClient( servers=settings.get(prefix + 'servers', ['localhost:9200']), timeout=settings.get(prefix + 'timeout', 1.0), index=settings[prefix + 'index'], use_transaction=asbool(settings.get(prefix + 'use_transaction', True)), disable_indexing=settings.get(prefix + 'disable_indexing', False)) def includeme(config): registry = config.registry settings = registry.settings client = client_from_config(settings) client.ensure_index() registry.pyramid_es_client = client def get_client(request): """ Get the registered Elasticsearch client. The supplied argument can be either a ``Request`` instance or a ``Registry``. """ registry = getattr(request, 'registry', None) if registry is None: registry = request return registry.pyramid_es_client
from pyramid.settings import asbool from .client import ElasticClient def client_from_config(settings, prefix='elastic.'): """ Instantiate and configure an Elasticsearch from settings. In typical Pyramid usage, you shouldn't use this directly: instead, just include ``pyramid_es`` and use the :py:func:`get_client` function to get access to the shared :py:class:`.client.ElasticClient` instance. """ return ElasticClient( servers=settings.get(prefix + 'servers', ['localhost:9200']), timeout=settings.get(prefix + 'timeout', 1.0), index=settings[prefix + 'index'], use_transaction=asbool(settings.get(prefix + 'use_transaction', True)), disable_indexing=settings.get(prefix + 'disable_indexing', False)) def includeme(config): registry = config.registry settings = registry.settings client = client_from_config(settings) if asbool(settings.get('elastic.ensure_index_on_start')): client.ensure_index() registry.pyramid_es_client = client def get_client(request): """ Get the registered Elasticsearch client. The supplied argument can be either a ``Request`` instance or a ``Registry``. """ registry = getattr(request, 'registry', None) if registry is None: registry = request return registry.pyramid_es_client
Add a settings key to ensure index at start
Add a settings key to ensure index at start
Python
mit
storborg/pyramid_es
<INSERT> if asbool(settings.get('elastic.ensure_index_on_start')): <INSERT_END> <|endoftext|> from pyramid.settings import asbool from .client import ElasticClient def client_from_config(settings, prefix='elastic.'): """ Instantiate and configure an Elasticsearch from settings. In typical Pyramid usage, you shouldn't use this directly: instead, just include ``pyramid_es`` and use the :py:func:`get_client` function to get access to the shared :py:class:`.client.ElasticClient` instance. """ return ElasticClient( servers=settings.get(prefix + 'servers', ['localhost:9200']), timeout=settings.get(prefix + 'timeout', 1.0), index=settings[prefix + 'index'], use_transaction=asbool(settings.get(prefix + 'use_transaction', True)), disable_indexing=settings.get(prefix + 'disable_indexing', False)) def includeme(config): registry = config.registry settings = registry.settings client = client_from_config(settings) if asbool(settings.get('elastic.ensure_index_on_start')): client.ensure_index() registry.pyramid_es_client = client def get_client(request): """ Get the registered Elasticsearch client. The supplied argument can be either a ``Request`` instance or a ``Registry``. """ registry = getattr(request, 'registry', None) if registry is None: registry = request return registry.pyramid_es_client
Add a settings key to ensure index at start from pyramid.settings import asbool from .client import ElasticClient def client_from_config(settings, prefix='elastic.'): """ Instantiate and configure an Elasticsearch from settings. In typical Pyramid usage, you shouldn't use this directly: instead, just include ``pyramid_es`` and use the :py:func:`get_client` function to get access to the shared :py:class:`.client.ElasticClient` instance. """ return ElasticClient( servers=settings.get(prefix + 'servers', ['localhost:9200']), timeout=settings.get(prefix + 'timeout', 1.0), index=settings[prefix + 'index'], use_transaction=asbool(settings.get(prefix + 'use_transaction', True)), disable_indexing=settings.get(prefix + 'disable_indexing', False)) def includeme(config): registry = config.registry settings = registry.settings client = client_from_config(settings) client.ensure_index() registry.pyramid_es_client = client def get_client(request): """ Get the registered Elasticsearch client. The supplied argument can be either a ``Request`` instance or a ``Registry``. """ registry = getattr(request, 'registry', None) if registry is None: registry = request return registry.pyramid_es_client
186e8424a16188825210b4c05656ae3b8fcc28c2
provpy/provpyexample_Elements.py
provpy/provpyexample_Elements.py
import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
Add an example file demonstrating defining, adding and serializing prov elements in a prov container
Add an example file demonstrating defining, adding and serializing prov elements in a prov container
Python
mit
satra/prov,krischer/prov,trungdong/prov,krischer/prov
<REPLACE_OLD> <REPLACE_NEW> import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4) <REPLACE_END> <|endoftext|> import json from provpy import * # Define your namespaces (see provpyexample_PROVURIRef_PROVNamespace.py) FOAF = PROVNamespace("http://xmlns.com/foaf/0.1/") ex = PROVNamespace("http://www.example.com/") dcterms = PROVNamespace("http://purl.org/dc/terms/") xsd = PROVNamespace('http://www.w3.org/2001/XMLSchema-datatypes#') prov = PROVNamespace("http://www.w3.org/ns/prov-dm/") # create a provenance container examplegraph = PROVContainer() # Set the default namespace name examplegraph.set_default_namespace("http://www.example.com/") #add the other namespaces into the container examplegraph.add_namespace("dcterms","http://purl.org/dc/terms/") examplegraph.add_namespace("foaf","http://xmlns.com/foaf/0.1/") # add entities, first define the attributes in a dictionary attrdict = {"type": "File", ex["path"]: "/shared/crime.txt", ex["creator"]: "Alice"} # then create the entity # If you give the id as a string, it will be treated as a localname # under the default namespace e0 = Entity(id="e0",attributes=attrdict) # you can then add the entity into the provenance container examplegraph.add(e0) # define the attributes for the next entity lit0 = PROVLiteral("2011-11-16T16:06:00",xsd["dateTime"]) attrdict ={prov["type"]: ex["File"], ex["path"]: "/shared/crime.txt", dcterms["creator"]: FOAF['Alice'], ex["content"]: "", dcterms["create"]: lit0} # create the entity, note this time we give the id as a PROVURIRef e1 = Entity(FOAF['Foo'],attributes=attrdict) examplegraph.add(e1) # add activities # You can give the attributes during the creation if there are not many a0 = Activity(id=None,starttime=datetime.datetime(2008, 7, 6, 5, 4, 3),attributes={prov["recipeLink"]: ex["create-file"]}) examplegraph.add(a0) # You can have the JSON of the container with the to_provJSON() function # as container is also a provenance record print json.dumps(examplegraph.to_provJSON(),indent=4)
Add an example file demonstrating defining, adding and serializing prov elements in a prov container
484f3537d634e31f79c2281cff869724707ee2c3
day03/solution.py
day03/solution.py
santaPosition = [0, 0] roboSantaPosition = [0, 0] uniquePositions = set() input = open("data", "r").read() for index, char in enumerate(input): position = [] if index % 2 == 0: position = santaPosition else: position = roboSantaPosition if char is '^': position[0] += 1 elif char is 'v': position[0] -= 1 elif char is '>': position[1] += 1 elif char is '<': position[1] -= 1 uniquePositions.add((position[0], position[1])) print "Happy Houses:", len(uniquePositions)
santaPosition = [0, 0] roboSantaPosition = [0, 0] uniquePositions = set() input = open("data", "r").read() for index, char in enumerate(input): position = [] if index % 2 == 0: position = santaPosition else: position = roboSantaPosition if char is '^': position[0] += 1 elif char is 'v': position[0] -= 1 elif char is '>': position[1] += 1 elif char is '<': position[1] -= 1 uniquePositions.add(tuple(position)) print "Happy Houses:", len(uniquePositions)
Make tuple creation from position cleaner.
Make tuple creation from position cleaner.
Python
mit
Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015,Mark-Simulacrum/advent-of-code-2015
<REPLACE_OLD> 1 uniquePositions.add((position[0], position[1])) print <REPLACE_NEW> 1 uniquePositions.add(tuple(position)) print <REPLACE_END> <REPLACE_OLD> len(uniquePositions) <REPLACE_NEW> len(uniquePositions) <REPLACE_END> <|endoftext|> santaPosition = [0, 0] roboSantaPosition = [0, 0] uniquePositions = set() input = open("data", "r").read() for index, char in enumerate(input): position = [] if index % 2 == 0: position = santaPosition else: position = roboSantaPosition if char is '^': position[0] += 1 elif char is 'v': position[0] -= 1 elif char is '>': position[1] += 1 elif char is '<': position[1] -= 1 uniquePositions.add(tuple(position)) print "Happy Houses:", len(uniquePositions)
Make tuple creation from position cleaner. santaPosition = [0, 0] roboSantaPosition = [0, 0] uniquePositions = set() input = open("data", "r").read() for index, char in enumerate(input): position = [] if index % 2 == 0: position = santaPosition else: position = roboSantaPosition if char is '^': position[0] += 1 elif char is 'v': position[0] -= 1 elif char is '>': position[1] += 1 elif char is '<': position[1] -= 1 uniquePositions.add((position[0], position[1])) print "Happy Houses:", len(uniquePositions)
ce2f07e7fa5ac38235cbb6ea6c4fee3a60031246
social_core/tests/backends/test_udata.py
social_core/tests/backends/test_udata.py
import json from six.moves.urllib_parse import urlencode from .oauth import OAuth2Test class DatagouvfrOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.udata.DatagouvfrOAuth2' user_data_url = 'https://www.data.gouv.fr/api/1/me/' expected_username = 'foobar' access_token_body = json.dumps({ 'access_token': 'foobar', 'token_type': 'bearer' }) request_token_body = urlencode({ 'oauth_token_secret': 'foobar-secret', 'oauth_token': 'foobar', 'oauth_callback_confirmed': 'true' }) user_data_body = json.dumps({}) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
import json from six.moves.urllib_parse import urlencode from .oauth import OAuth2Test class DatagouvfrOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.udata.DatagouvfrOAuth2' user_data_url = 'https://www.data.gouv.fr/api/1/me/' expected_username = 'foobar' access_token_body = json.dumps({ 'access_token': 'foobar', 'token_type': 'bearer', 'first_name': 'foobar', 'email': 'foobar@example.com' }) request_token_body = urlencode({ 'oauth_token_secret': 'foobar-secret', 'oauth_token': 'foobar', 'oauth_callback_confirmed': 'true' }) user_data_body = json.dumps({}) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
Fix tests for udata/datagouvfr backend
Fix tests for udata/datagouvfr backend
Python
bsd-3-clause
python-social-auth/social-core,python-social-auth/social-core
<REPLACE_OLD> 'bearer' <REPLACE_NEW> 'bearer', 'first_name': 'foobar', 'email': 'foobar@example.com' <REPLACE_END> <|endoftext|> import json from six.moves.urllib_parse import urlencode from .oauth import OAuth2Test class DatagouvfrOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.udata.DatagouvfrOAuth2' user_data_url = 'https://www.data.gouv.fr/api/1/me/' expected_username = 'foobar' access_token_body = json.dumps({ 'access_token': 'foobar', 'token_type': 'bearer', 'first_name': 'foobar', 'email': 'foobar@example.com' }) request_token_body = urlencode({ 'oauth_token_secret': 'foobar-secret', 'oauth_token': 'foobar', 'oauth_callback_confirmed': 'true' }) user_data_body = json.dumps({}) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
Fix tests for udata/datagouvfr backend import json from six.moves.urllib_parse import urlencode from .oauth import OAuth2Test class DatagouvfrOAuth2Test(OAuth2Test): backend_path = 'social_core.backends.udata.DatagouvfrOAuth2' user_data_url = 'https://www.data.gouv.fr/api/1/me/' expected_username = 'foobar' access_token_body = json.dumps({ 'access_token': 'foobar', 'token_type': 'bearer' }) request_token_body = urlencode({ 'oauth_token_secret': 'foobar-secret', 'oauth_token': 'foobar', 'oauth_callback_confirmed': 'true' }) user_data_body = json.dumps({}) def test_login(self): self.do_login() def test_partial_pipeline(self): self.do_partial_pipeline()
27aad0e3ed95cb43b28eb3c02fa96b3a9b74de5b
tests/test_container.py
tests/test_container.py
# coding: utf8 from .common import * class TestContainers(TestCase): def test_unicode_filename(self): container = av.open(self.sandboxed(u'¢∞§¶•ªº.mov'), 'w')
# coding: utf8 import os import sys import unittest from .common import * # On Windows, Python 3.0 - 3.5 have issues handling unicode filenames. # Starting with Python 3.6 the situation is saner thanks to PEP 529: # # https://www.python.org/dev/peps/pep-0529/ broken_unicode = ( os.name == 'nt' and sys.version_info >= (3, 0) and sys.version_info < (3, 6)) class TestContainers(TestCase): @unittest.skipIf(broken_unicode, 'Unicode filename handling is broken') def test_unicode_filename(self): container = av.open(self.sandboxed(u'¢∞§¶•ªº.mov'), 'w')
Disable unicode filename test on Windows with Python 3.0 - 3.5
Disable unicode filename test on Windows with Python 3.0 - 3.5 Before PEP 529 landed in Python 3.6, unicode filename handling on Windows is hit-and-miss, so don't break CI.
Python
bsd-3-clause
PyAV-Org/PyAV,mikeboers/PyAV,PyAV-Org/PyAV,mikeboers/PyAV
<REPLACE_OLD> utf8 from <REPLACE_NEW> utf8 import os import sys import unittest from <REPLACE_END> <REPLACE_OLD> * class <REPLACE_NEW> * # On Windows, Python 3.0 - 3.5 have issues handling unicode filenames. # Starting with Python 3.6 the situation is saner thanks to PEP 529: # # https://www.python.org/dev/peps/pep-0529/ broken_unicode = ( os.name == 'nt' and sys.version_info >= (3, 0) and sys.version_info < (3, 6)) class <REPLACE_END> <INSERT> @unittest.skipIf(broken_unicode, 'Unicode filename handling is broken') <INSERT_END> <|endoftext|> # coding: utf8 import os import sys import unittest from .common import * # On Windows, Python 3.0 - 3.5 have issues handling unicode filenames. # Starting with Python 3.6 the situation is saner thanks to PEP 529: # # https://www.python.org/dev/peps/pep-0529/ broken_unicode = ( os.name == 'nt' and sys.version_info >= (3, 0) and sys.version_info < (3, 6)) class TestContainers(TestCase): @unittest.skipIf(broken_unicode, 'Unicode filename handling is broken') def test_unicode_filename(self): container = av.open(self.sandboxed(u'¢∞§¶•ªº.mov'), 'w')
Disable unicode filename test on Windows with Python 3.0 - 3.5 Before PEP 529 landed in Python 3.6, unicode filename handling on Windows is hit-and-miss, so don't break CI. # coding: utf8 from .common import * class TestContainers(TestCase): def test_unicode_filename(self): container = av.open(self.sandboxed(u'¢∞§¶•ªº.mov'), 'w')
c53f03c738ad6357ccd87a506cbc05bc1e2a8474
views.py
views.py
from django.http import HttpResponse from django.shortcuts import render_to_response from store.models import FandomHierarchy def frontpage(request, filter): return render_to_response('index.html', {'filter': filter, 'nodes': FandomHierarchy.objects.all()})
from django.http import HttpResponse from django.shortcuts import render_to_response from store.models import FandomHierarchy def frontpage(request, filter=None): return render_to_response('index.html', {'filter': filter, 'nodes': FandomHierarchy.objects.all()})
Add default value for filter so things don't break
Add default value for filter so things don't break
Python
bsd-3-clause
willmurnane/store
<REPLACE_OLD> filter): return <REPLACE_NEW> filter=None): return <REPLACE_END> <|endoftext|> from django.http import HttpResponse from django.shortcuts import render_to_response from store.models import FandomHierarchy def frontpage(request, filter=None): return render_to_response('index.html', {'filter': filter, 'nodes': FandomHierarchy.objects.all()})
Add default value for filter so things don't break from django.http import HttpResponse from django.shortcuts import render_to_response from store.models import FandomHierarchy def frontpage(request, filter): return render_to_response('index.html', {'filter': filter, 'nodes': FandomHierarchy.objects.all()})
cd4d67ae0796e45ef699e1bab60ee5aeeac91dbb
native_qwebview_example/run.py
native_qwebview_example/run.py
import sys from browser import BrowserDialog from PyQt4 import QtGui from PyQt4.QtCore import QUrl from PyQt4.QtWebKit import QWebView class MyBrowser(QtGui.QDialog): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) QWebView.__init__(self) self.ui = BrowserDialog() self.ui.setupUi(self) self.ui.lineEdit.returnPressed.connect(self.loadURL) def loadURL(self): url = self.ui.lineEdit.text() self.ui.qwebview.load(QUrl(url)) self.show() # self.ui.lineEdit.setText("") if __name__ == "__main__": app = QtGui.QApplication(sys.argv) myapp = MyBrowser() myapp.ui.qwebview.load(QUrl('http://localhost:8800/taxtweb')) myapp.show() sys.exit(app.exec_())
# Basic example for testing purposes, taken from # https://pythonspot.com/creating-a-webbrowser-with-python-and-pyqt-tutorial/ import sys from browser import BrowserDialog from PyQt4 import QtGui from PyQt4.QtCore import QUrl from PyQt4.QtWebKit import QWebView class MyBrowser(QtGui.QDialog): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) QWebView.__init__(self) self.ui = BrowserDialog() self.ui.setupUi(self) self.ui.lineEdit.returnPressed.connect(self.loadURL) def loadURL(self): url = self.ui.lineEdit.text() self.ui.qwebview.load(QUrl(url)) self.show() # self.ui.lineEdit.setText("") if __name__ == "__main__": app = QtGui.QApplication(sys.argv) myapp = MyBrowser() myapp.ui.qwebview.load(QUrl('http://localhost:8800/taxtweb')) myapp.show() sys.exit(app.exec_())
Add a comment about where the basic example was taken [skip CI]
Add a comment about where the basic example was taken [skip CI]
Python
agpl-3.0
gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis,gem/oq-svir-qgis
<REPLACE_OLD> import <REPLACE_NEW> # Basic example for testing purposes, taken from # https://pythonspot.com/creating-a-webbrowser-with-python-and-pyqt-tutorial/ import <REPLACE_END> <|endoftext|> # Basic example for testing purposes, taken from # https://pythonspot.com/creating-a-webbrowser-with-python-and-pyqt-tutorial/ import sys from browser import BrowserDialog from PyQt4 import QtGui from PyQt4.QtCore import QUrl from PyQt4.QtWebKit import QWebView class MyBrowser(QtGui.QDialog): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) QWebView.__init__(self) self.ui = BrowserDialog() self.ui.setupUi(self) self.ui.lineEdit.returnPressed.connect(self.loadURL) def loadURL(self): url = self.ui.lineEdit.text() self.ui.qwebview.load(QUrl(url)) self.show() # self.ui.lineEdit.setText("") if __name__ == "__main__": app = QtGui.QApplication(sys.argv) myapp = MyBrowser() myapp.ui.qwebview.load(QUrl('http://localhost:8800/taxtweb')) myapp.show() sys.exit(app.exec_())
Add a comment about where the basic example was taken [skip CI] import sys from browser import BrowserDialog from PyQt4 import QtGui from PyQt4.QtCore import QUrl from PyQt4.QtWebKit import QWebView class MyBrowser(QtGui.QDialog): def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) QWebView.__init__(self) self.ui = BrowserDialog() self.ui.setupUi(self) self.ui.lineEdit.returnPressed.connect(self.loadURL) def loadURL(self): url = self.ui.lineEdit.text() self.ui.qwebview.load(QUrl(url)) self.show() # self.ui.lineEdit.setText("") if __name__ == "__main__": app = QtGui.QApplication(sys.argv) myapp = MyBrowser() myapp.ui.qwebview.load(QUrl('http://localhost:8800/taxtweb')) myapp.show() sys.exit(app.exec_())
b26d8903561809bec0224cd0ebec5c3c6bff7ee5
setup.py
setup.py
from setuptools import setup, find_packages requires = [ 'configparser', 'python-dateutil', 'pytz', 'requests', 'simplejson' ] setup( name='amaascore', version='0.1.7', description='Asset Management as a Service - Core SDK', license='Apache License 2.0', url='https://github.com/amaas-fintech/amaas-core-sdk-python', author='AMaaS', author_email='tech@amaas.com', classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], packages=find_packages(exclude=['tests']), # Very annoying that this doesnt work - I have to include a MANIFEST install_requires=requires, )
from setuptools import setup, find_packages requires = [ 'configparser', 'python-dateutil', 'pytz', 'requests', 'simplejson' ] setup( name='amaascore', version='0.1.8', description='Asset Management as a Service - Core SDK', license='Apache License 2.0', url='https://github.com/amaas-fintech/amaas-core-sdk-python', author='AMaaS', author_email='tech@amaas.com', classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], packages=find_packages(exclude=['tests']), # Very annoying that this doesnt work - I have to include a MANIFEST install_requires=requires, )
Increment version for deployment to PyPI
Increment version for deployment to PyPI
Python
apache-2.0
paul-rs/amaas-core-sdk-python,amaas-fintech/amaas-core-sdk-python,nedlowe/amaas-core-sdk-python,nedlowe/amaas-core-sdk-python,paul-rs/amaas-core-sdk-python,amaas-fintech/amaas-core-sdk-python
<REPLACE_OLD> version='0.1.7', <REPLACE_NEW> version='0.1.8', <REPLACE_END> <|endoftext|> from setuptools import setup, find_packages requires = [ 'configparser', 'python-dateutil', 'pytz', 'requests', 'simplejson' ] setup( name='amaascore', version='0.1.8', description='Asset Management as a Service - Core SDK', license='Apache License 2.0', url='https://github.com/amaas-fintech/amaas-core-sdk-python', author='AMaaS', author_email='tech@amaas.com', classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], packages=find_packages(exclude=['tests']), # Very annoying that this doesnt work - I have to include a MANIFEST install_requires=requires, )
Increment version for deployment to PyPI from setuptools import setup, find_packages requires = [ 'configparser', 'python-dateutil', 'pytz', 'requests', 'simplejson' ] setup( name='amaascore', version='0.1.7', description='Asset Management as a Service - Core SDK', license='Apache License 2.0', url='https://github.com/amaas-fintech/amaas-core-sdk-python', author='AMaaS', author_email='tech@amaas.com', classifiers=[ 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', ], packages=find_packages(exclude=['tests']), # Very annoying that this doesnt work - I have to include a MANIFEST install_requires=requires, )
2b0c7304c1372997bc226d255ef22cc31f56f6fa
caminae/core/management.py
caminae/core/management.py
# http://djangosnippets.org/snippets/2311/ # Ensure South will update our custom SQL during a call to `migrate`. from south.signals import post_migrate def run_initial_sql(sender, **kwargs): app_label = kwargs.get('app') import os from django.db import connection, transaction, models app_dir = os.path.normpath(os.path.join(os.path.dirname( models.get_app(app_label).__file__), 'sql')) backend_name = connection.settings_dict['ENGINE'].split('.')[-1] sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)), os.path.join(app_dir, "%s.sql" % app_label)] cursor = connection.cursor() for sql_file in sql_files: try: if os.path.exists(sql_file): print "Loading initial SQL data from '%s'" % sql_file f = open(sql_file) sql = f.read() f.close() cursor.execute(sql) except Exception, e: import sys sys.stderr.write("Failed to install custom SQL file '%s': %s\n" % (sql_file, e)) import traceback traceback.print_exc() transaction.rollback_unless_managed() else: transaction.commit_unless_managed() post_migrate.connect(run_initial_sql)
Enable auto-loading of raw SQL during South migration
Enable auto-loading of raw SQL during South migration
Python
bsd-2-clause
makinacorpus/Geotrek,GeotrekCE/Geotrek-admin,GeotrekCE/Geotrek-admin,camillemonchicourt/Geotrek,GeotrekCE/Geotrek-admin,johan--/Geotrek,johan--/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,Anaethelion/Geotrek,camillemonchicourt/Geotrek,makinacorpus/Geotrek,Anaethelion/Geotrek,johan--/Geotrek,mabhub/Geotrek,johan--/Geotrek,GeotrekCE/Geotrek-admin,makinacorpus/Geotrek,Anaethelion/Geotrek,mabhub/Geotrek,camillemonchicourt/Geotrek,makinacorpus/Geotrek
<REPLACE_OLD> <REPLACE_NEW> # http://djangosnippets.org/snippets/2311/ # Ensure South will update our custom SQL during a call to `migrate`. from south.signals import post_migrate def run_initial_sql(sender, **kwargs): app_label = kwargs.get('app') import os from django.db import connection, transaction, models app_dir = os.path.normpath(os.path.join(os.path.dirname( models.get_app(app_label).__file__), 'sql')) backend_name = connection.settings_dict['ENGINE'].split('.')[-1] sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)), os.path.join(app_dir, "%s.sql" % app_label)] cursor = connection.cursor() for sql_file in sql_files: try: if os.path.exists(sql_file): print "Loading initial SQL data from '%s'" % sql_file f = open(sql_file) sql = f.read() f.close() cursor.execute(sql) except Exception, e: import sys sys.stderr.write("Failed to install custom SQL file '%s': %s\n" % (sql_file, e)) import traceback traceback.print_exc() transaction.rollback_unless_managed() else: transaction.commit_unless_managed() post_migrate.connect(run_initial_sql) <REPLACE_END> <|endoftext|> # http://djangosnippets.org/snippets/2311/ # Ensure South will update our custom SQL during a call to `migrate`. from south.signals import post_migrate def run_initial_sql(sender, **kwargs): app_label = kwargs.get('app') import os from django.db import connection, transaction, models app_dir = os.path.normpath(os.path.join(os.path.dirname( models.get_app(app_label).__file__), 'sql')) backend_name = connection.settings_dict['ENGINE'].split('.')[-1] sql_files = [os.path.join(app_dir, "%s.%s.sql" % (app_label, backend_name)), os.path.join(app_dir, "%s.sql" % app_label)] cursor = connection.cursor() for sql_file in sql_files: try: if os.path.exists(sql_file): print "Loading initial SQL data from '%s'" % sql_file f = open(sql_file) sql = f.read() f.close() cursor.execute(sql) except Exception, e: import sys sys.stderr.write("Failed to install custom SQL file '%s': %s\n" % (sql_file, e)) import traceback traceback.print_exc() transaction.rollback_unless_managed() else: transaction.commit_unless_managed() post_migrate.connect(run_initial_sql)
Enable auto-loading of raw SQL during South migration
708df747d1fba202780e97e1b1eb1af024f26f72
setup.py
setup.py
#!/usr/bin/env python from __future__ import absolute_import, division, print_function from setuptools import setup setup( include_package_data=True, test_suite="nose.collector", tests_require="nose", setup_requires=['pbr'], pbr=True, )
#!/usr/bin/env python from __future__ import absolute_import, division, print_function from setuptools import setup setup( include_package_data=True, test_suite="nose.collector", tests_require=['nose', 'hocr-spec'], setup_requires=['pbr'], pbr=True, )
Add hocr-spec-python to test requirements
Add hocr-spec-python to test requirements
Python
apache-2.0
mittagessen/kraken,mittagessen/kraken,mittagessen/kraken,mittagessen/kraken
<REPLACE_OLD> tests_require="nose", <REPLACE_NEW> tests_require=['nose', 'hocr-spec'], <REPLACE_END> <|endoftext|> #!/usr/bin/env python from __future__ import absolute_import, division, print_function from setuptools import setup setup( include_package_data=True, test_suite="nose.collector", tests_require=['nose', 'hocr-spec'], setup_requires=['pbr'], pbr=True, )
Add hocr-spec-python to test requirements #!/usr/bin/env python from __future__ import absolute_import, division, print_function from setuptools import setup setup( include_package_data=True, test_suite="nose.collector", tests_require="nose", setup_requires=['pbr'], pbr=True, )
62586dc0e4e9ca8d0fee6c72e296c74875f3a65c
api/swd6/api/app.py
api/swd6/api/app.py
import logging import os import flask import flask_cors from sqlalchemy_jsonapi import flaskext as flask_jsonapi from swd6 import config from swd6.db.models import db CONF = config.CONF DEFAULT_CONF_PATH = '/opt/swd6/api/api.conf' app = None def start(): # pylint: disable=global-statement global app app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SQLALCHEMY_DATABASE_URI'] = CONF.db.uri app.config['SERVER_NAME'] = CONF.api.host app.logger.setLevel(logging.DEBUG) flask_cors.CORS(app, origins=CONF.api.cors_hosts) logging.getLogger('flask_cors').level = logging.DEBUG db.init_app(app) flask_jsonapi.FlaskJSONAPI(app, db, options={'dasherize': False, 'include_fk_columns': True}) return app logging.basicConfig(level=logging.DEBUG) if os.path.exists(DEFAULT_CONF_PATH): config_files = [DEFAULT_CONF_PATH] else: config_files = [] config.load([], default_config_files=config_files) start()
import logging import os import flask import flask_cors from sqlalchemy_jsonapi import flaskext as flask_jsonapi from swd6 import config from swd6.db.models import db CONF = config.CONF DEFAULT_CONF_PATH = '/opt/swd6/api/api.conf' app = None def start(): # pylint: disable=global-statement global app app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SQLALCHEMY_DATABASE_URI'] = CONF.db.uri app.config['SERVER_NAME'] = CONF.api.host app.logger.setLevel(logging.DEBUG) flask_cors.CORS(app, origins=CONF.api.cors_hosts, supports_credentials=True) logging.getLogger('flask_cors').level = logging.DEBUG db.init_app(app) flask_jsonapi.FlaskJSONAPI(app, db, options={'dasherize': False, 'include_fk_columns': True}) return app logging.basicConfig(level=logging.DEBUG) if os.path.exists(DEFAULT_CONF_PATH): config_files = [DEFAULT_CONF_PATH] else: config_files = [] config.load([], default_config_files=config_files) start()
Fix CORS to allow for credentials
Fix CORS to allow for credentials Something changed in the client code requiring this setting.
Python
apache-2.0
jimbobhickville/swd6,jimbobhickville/swd6,jimbobhickville/swd6
<REPLACE_OLD> origins=CONF.api.cors_hosts) <REPLACE_NEW> origins=CONF.api.cors_hosts, supports_credentials=True) <REPLACE_END> <|endoftext|> import logging import os import flask import flask_cors from sqlalchemy_jsonapi import flaskext as flask_jsonapi from swd6 import config from swd6.db.models import db CONF = config.CONF DEFAULT_CONF_PATH = '/opt/swd6/api/api.conf' app = None def start(): # pylint: disable=global-statement global app app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SQLALCHEMY_DATABASE_URI'] = CONF.db.uri app.config['SERVER_NAME'] = CONF.api.host app.logger.setLevel(logging.DEBUG) flask_cors.CORS(app, origins=CONF.api.cors_hosts, supports_credentials=True) logging.getLogger('flask_cors').level = logging.DEBUG db.init_app(app) flask_jsonapi.FlaskJSONAPI(app, db, options={'dasherize': False, 'include_fk_columns': True}) return app logging.basicConfig(level=logging.DEBUG) if os.path.exists(DEFAULT_CONF_PATH): config_files = [DEFAULT_CONF_PATH] else: config_files = [] config.load([], default_config_files=config_files) start()
Fix CORS to allow for credentials Something changed in the client code requiring this setting. import logging import os import flask import flask_cors from sqlalchemy_jsonapi import flaskext as flask_jsonapi from swd6 import config from swd6.db.models import db CONF = config.CONF DEFAULT_CONF_PATH = '/opt/swd6/api/api.conf' app = None def start(): # pylint: disable=global-statement global app app = flask.Flask(__name__) app.config['DEBUG'] = True app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True app.config['SQLALCHEMY_DATABASE_URI'] = CONF.db.uri app.config['SERVER_NAME'] = CONF.api.host app.logger.setLevel(logging.DEBUG) flask_cors.CORS(app, origins=CONF.api.cors_hosts) logging.getLogger('flask_cors').level = logging.DEBUG db.init_app(app) flask_jsonapi.FlaskJSONAPI(app, db, options={'dasherize': False, 'include_fk_columns': True}) return app logging.basicConfig(level=logging.DEBUG) if os.path.exists(DEFAULT_CONF_PATH): config_files = [DEFAULT_CONF_PATH] else: config_files = [] config.load([], default_config_files=config_files) start()
ffdfa7729c865c8e999bdfd4af49e3beb6899970
test.py
test.py
import pdb import time import collections import random import itertools import xmlrpclib import dis server_url = 'http://127.0.0.1:20738/RPC2' server = xmlrpclib.Server(server_url) G = server.ubigraph def erdos (): vert_ids = range(0,1000) p = 0.001 G.clear() pdb.set_trace() for id in vert_ids: G.new_vertex_w_id(id) edge_count = 0 for i,j in itertools.combinations(vert_ids, 2): r = random.random() if r <= p: edge_count += 1 id = G.new_edge(i, j) #G.set_edge_attribute(id, 'oriented', 'true') #G.set_edge_attribute(id, 'arrow', 'true') #G.set_edge_attribute(id, 'showstrain', 'true') #G.set_edge_attribute(id, 'strength', '0.0') print edge_count def groupbycount (ids): return [len(list(li[1])) for li in itertools.groupby(sorted(ids))] def preferential (): G.clear() size = 1000 G.new_vertex_w_id(0) ids = [0] for i in range(1, size): G.new_vertex_w_id(i) j = random.choice(ids) G.new_edge(i,j) ids += [i,j] hist = groupbycount(ids) histprime = collections.defaultdict(lambda:0) for i in hist: histprime[i] += 1 print sorted([(k,v) for k,v in histprime.items()]) if __name__ == '__main__': #preferential() erdos()
Add code to generate Erdos and preferential attachment graphs
Add code to generate Erdos and preferential attachment graphs
Python
mit
hausdorff/python-api-breakin
<REPLACE_OLD> <REPLACE_NEW> import pdb import time import collections import random import itertools import xmlrpclib import dis server_url = 'http://127.0.0.1:20738/RPC2' server = xmlrpclib.Server(server_url) G = server.ubigraph def erdos (): vert_ids = range(0,1000) p = 0.001 G.clear() pdb.set_trace() for id in vert_ids: G.new_vertex_w_id(id) edge_count = 0 for i,j in itertools.combinations(vert_ids, 2): r = random.random() if r <= p: edge_count += 1 id = G.new_edge(i, j) #G.set_edge_attribute(id, 'oriented', 'true') #G.set_edge_attribute(id, 'arrow', 'true') #G.set_edge_attribute(id, 'showstrain', 'true') #G.set_edge_attribute(id, 'strength', '0.0') print edge_count def groupbycount (ids): return [len(list(li[1])) for li in itertools.groupby(sorted(ids))] def preferential (): G.clear() size = 1000 G.new_vertex_w_id(0) ids = [0] for i in range(1, size): G.new_vertex_w_id(i) j = random.choice(ids) G.new_edge(i,j) ids += [i,j] hist = groupbycount(ids) histprime = collections.defaultdict(lambda:0) for i in hist: histprime[i] += 1 print sorted([(k,v) for k,v in histprime.items()]) if __name__ == '__main__': #preferential() erdos() <REPLACE_END> <|endoftext|> import pdb import time import collections import random import itertools import xmlrpclib import dis server_url = 'http://127.0.0.1:20738/RPC2' server = xmlrpclib.Server(server_url) G = server.ubigraph def erdos (): vert_ids = range(0,1000) p = 0.001 G.clear() pdb.set_trace() for id in vert_ids: G.new_vertex_w_id(id) edge_count = 0 for i,j in itertools.combinations(vert_ids, 2): r = random.random() if r <= p: edge_count += 1 id = G.new_edge(i, j) #G.set_edge_attribute(id, 'oriented', 'true') #G.set_edge_attribute(id, 'arrow', 'true') #G.set_edge_attribute(id, 'showstrain', 'true') #G.set_edge_attribute(id, 'strength', '0.0') print edge_count def groupbycount (ids): return [len(list(li[1])) for li in itertools.groupby(sorted(ids))] def preferential (): G.clear() size = 1000 G.new_vertex_w_id(0) ids = [0] for i in range(1, size): G.new_vertex_w_id(i) j = random.choice(ids) G.new_edge(i,j) ids += [i,j] hist = groupbycount(ids) histprime = collections.defaultdict(lambda:0) for i in hist: histprime[i] += 1 print sorted([(k,v) for k,v in histprime.items()]) if __name__ == '__main__': #preferential() erdos()
Add code to generate Erdos and preferential attachment graphs
442aa916dc7b6d199b2c5e1fe973aa3fed8e9c35
src/python/grpcio_tests/tests_aio/unit/init_test.py
src/python/grpcio_tests/tests_aio/unit/init_test.py
# Copyright 2019 The gRPC Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from tests_aio.unit._test_base import AioTestBase class TestInit(AioTestBase): async def test_grpc(self): import grpc # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) async def test_grpc_dot_aio(self): import grpc.aio # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) async def test_aio_from_grpc(self): from grpc import aio # pylint: disable=wrong-import-position channel = aio.insecure_channel('dummy') self.assertIsInstance(channel, aio.Channel) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2)
# Copyright 2019 The gRPC Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from tests_aio.unit._test_base import AioTestBase class TestInit(AioTestBase): async def test_grpc(self): import grpc # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) async def test_grpc_dot_aio(self): import grpc.aio # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2)
Make sure the module space won't be polluted by "from grpc import aio"
Make sure the module space won't be polluted by "from grpc import aio"
Python
apache-2.0
jtattermusch/grpc,donnadionne/grpc,nicolasnoble/grpc,grpc/grpc,ejona86/grpc,donnadionne/grpc,ctiller/grpc,jtattermusch/grpc,nicolasnoble/grpc,donnadionne/grpc,vjpai/grpc,grpc/grpc,grpc/grpc,ctiller/grpc,vjpai/grpc,jtattermusch/grpc,donnadionne/grpc,grpc/grpc,ejona86/grpc,stanley-cheung/grpc,stanley-cheung/grpc,nicolasnoble/grpc,vjpai/grpc,vjpai/grpc,stanley-cheung/grpc,ctiller/grpc,stanley-cheung/grpc,vjpai/grpc,ejona86/grpc,stanley-cheung/grpc,ejona86/grpc,jtattermusch/grpc,ctiller/grpc,grpc/grpc,vjpai/grpc,ctiller/grpc,grpc/grpc,donnadionne/grpc,vjpai/grpc,ejona86/grpc,jtattermusch/grpc,jtattermusch/grpc,jtattermusch/grpc,vjpai/grpc,vjpai/grpc,jtattermusch/grpc,donnadionne/grpc,nicolasnoble/grpc,nicolasnoble/grpc,jtattermusch/grpc,ctiller/grpc,stanley-cheung/grpc,donnadionne/grpc,jtattermusch/grpc,donnadionne/grpc,nicolasnoble/grpc,nicolasnoble/grpc,jtattermusch/grpc,ctiller/grpc,donnadionne/grpc,grpc/grpc,grpc/grpc,ctiller/grpc,stanley-cheung/grpc,ctiller/grpc,nicolasnoble/grpc,donnadionne/grpc,stanley-cheung/grpc,ejona86/grpc,stanley-cheung/grpc,donnadionne/grpc,jtattermusch/grpc,ejona86/grpc,stanley-cheung/grpc,donnadionne/grpc,ctiller/grpc,ejona86/grpc,ctiller/grpc,stanley-cheung/grpc,nicolasnoble/grpc,ejona86/grpc,vjpai/grpc,stanley-cheung/grpc,grpc/grpc,grpc/grpc,grpc/grpc,vjpai/grpc,nicolasnoble/grpc,nicolasnoble/grpc,nicolasnoble/grpc,ejona86/grpc,ctiller/grpc,ejona86/grpc,ejona86/grpc,grpc/grpc,vjpai/grpc
<REPLACE_OLD> grpc.aio.Channel) async def test_aio_from_grpc(self): from grpc import aio # pylint: disable=wrong-import-position channel = aio.insecure_channel('dummy') self.assertIsInstance(channel, aio.Channel) if <REPLACE_NEW> grpc.aio.Channel) if <REPLACE_END> <|endoftext|> # Copyright 2019 The gRPC Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from tests_aio.unit._test_base import AioTestBase class TestInit(AioTestBase): async def test_grpc(self): import grpc # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) async def test_grpc_dot_aio(self): import grpc.aio # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2)
Make sure the module space won't be polluted by "from grpc import aio" # Copyright 2019 The gRPC Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import unittest from tests_aio.unit._test_base import AioTestBase class TestInit(AioTestBase): async def test_grpc(self): import grpc # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) async def test_grpc_dot_aio(self): import grpc.aio # pylint: disable=wrong-import-position channel = grpc.aio.insecure_channel('dummy') self.assertIsInstance(channel, grpc.aio.Channel) async def test_aio_from_grpc(self): from grpc import aio # pylint: disable=wrong-import-position channel = aio.insecure_channel('dummy') self.assertIsInstance(channel, aio.Channel) if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) unittest.main(verbosity=2)
2fb3a72885d279f7a79e10f00d71991144748f1c
haas/plugins/base_plugin.py
haas/plugins/base_plugin.py
from haas.utils import uncamelcase from .i_plugin import IPlugin class BasePlugin(IPlugin): name = None enabled = False enabling_option = None def __init__(self, name=None): if name is None: name = uncamelcase(type(self).__name__, sep='-') self.name = name self.enabling_option = 'with_{0}'.format(name.replace('-', '_')) def add_parser_arguments(self, parser): parser.add_argument('--with-{0}'.format(self.name), action='store_true', dest=self.enabling_option) def configure(self, args): if getattr(args, self.enabling_option, False): self.enabled = True
from haas.utils import uncamelcase from .i_plugin import IPlugin class BasePlugin(IPlugin): name = None enabled = False enabling_option = None def __init__(self, name=None): if name is None: name = uncamelcase(type(self).__name__, sep='-') self.name = name self.enabling_option = 'with_{0}'.format(name.replace('-', '_')) def add_parser_arguments(self, parser): parser.add_argument('--with-{0}'.format(self.name), action='store_true', help='Enable the {0} plugin'.format(self.name), dest=self.enabling_option) def configure(self, args): if getattr(args, self.enabling_option, False): self.enabled = True
Add help text for plugin enable option
Add help text for plugin enable option
Python
bsd-3-clause
sjagoe/haas,scalative/haas,sjagoe/haas,itziakos/haas,scalative/haas,itziakos/haas
<INSERT> help='Enable the {0} plugin'.format(self.name), <INSERT_END> <|endoftext|> from haas.utils import uncamelcase from .i_plugin import IPlugin class BasePlugin(IPlugin): name = None enabled = False enabling_option = None def __init__(self, name=None): if name is None: name = uncamelcase(type(self).__name__, sep='-') self.name = name self.enabling_option = 'with_{0}'.format(name.replace('-', '_')) def add_parser_arguments(self, parser): parser.add_argument('--with-{0}'.format(self.name), action='store_true', help='Enable the {0} plugin'.format(self.name), dest=self.enabling_option) def configure(self, args): if getattr(args, self.enabling_option, False): self.enabled = True
Add help text for plugin enable option from haas.utils import uncamelcase from .i_plugin import IPlugin class BasePlugin(IPlugin): name = None enabled = False enabling_option = None def __init__(self, name=None): if name is None: name = uncamelcase(type(self).__name__, sep='-') self.name = name self.enabling_option = 'with_{0}'.format(name.replace('-', '_')) def add_parser_arguments(self, parser): parser.add_argument('--with-{0}'.format(self.name), action='store_true', dest=self.enabling_option) def configure(self, args): if getattr(args, self.enabling_option, False): self.enabled = True
70458f45f3419927271f51872252834f08ef13f2
workshopvenues/venues/tests.py
workshopvenues/venues/tests.py
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2)
""" This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase from .models import Address class ModelsTest(TestCase): def test_create_address(self): a = Address() a.street = '23, Test Street' a.town = 'London' a.postcode = 'xxxxx' a.country = 'UK' a.save() self.assertTrue(a.id >= 0)
Add Address model creation test case
Add Address model creation test case
Python
bsd-3-clause
andreagrandi/workshopvenues
<REPLACE_OLD> TestCase class SimpleTest(TestCase): <REPLACE_NEW> TestCase from .models import Address class ModelsTest(TestCase): <REPLACE_END> <REPLACE_OLD> test_basic_addition(self): <REPLACE_NEW> test_create_address(self): <REPLACE_END> <REPLACE_OLD> """ <REPLACE_NEW> a = Address() <REPLACE_END> <REPLACE_OLD> Tests that 1 + 1 always equals 2. <REPLACE_NEW> a.street = '23, Test Street' <REPLACE_END> <REPLACE_OLD> """ <REPLACE_NEW> a.town = 'London' <REPLACE_END> <REPLACE_OLD> self.assertEqual(1 + 1, 2) <REPLACE_NEW> a.postcode = 'xxxxx' a.country = 'UK' a.save() self.assertTrue(a.id >= 0) <REPLACE_END> <|endoftext|> """ This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase from .models import Address class ModelsTest(TestCase): def test_create_address(self): a = Address() a.street = '23, Test Street' a.town = 'London' a.postcode = 'xxxxx' a.country = 'UK' a.save() self.assertTrue(a.id >= 0)
Add Address model creation test case """ This file demonstrates writing tests using the unittest module. These will pass when you run "manage.py test". Replace this with more appropriate tests for your application. """ from django.test import TestCase class SimpleTest(TestCase): def test_basic_addition(self): """ Tests that 1 + 1 always equals 2. """ self.assertEqual(1 + 1, 2)
e8cfb78df42021097e54009c5b724c1d176822bc
sale_analytic_cost/__openerp__.py
sale_analytic_cost/__openerp__.py
# -*- coding: utf-8 -*- # (c) 2015 Ainara Galdona - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html { 'name': 'Sale Analytic Cost', "version": "8.0.1.0.0", "license": 'AGPL-3', "author": 'AvanzOSC,' 'Serv. Tecnol. Avanzados - Pedro M. Baeza', 'website': "http://www.odoomrp.com", "contributors": [ "Ainara Galdona <ainaragaldona@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <anajuaristi@avanzosc.es>", ], 'category': 'Sales', 'depends': ['sale', 'account', 'mrp_production_project_estimated_cost'], 'data': [], 'installable': True, }
# -*- coding: utf-8 -*- # (c) 2015 Ainara Galdona - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html { 'name': 'Sale Analytic Cost', "version": "8.0.1.0.0", "license": 'AGPL-3', "author": 'AvanzOSC,' 'Serv. Tecnol. Avanzados - Pedro M. Baeza', 'website': "http://www.odoomrp.com", "contributors": [ "Ainara Galdona <ainaragaldona@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <anajuaristi@avanzosc.es>", ], 'category': 'Sales', 'depends': ['sale', 'account', 'mrp_production_estimated_cost'], 'data': [], 'installable': True, }
Change dependency with "mrp_production_project_estimated_cost" by dependecy "mrp_production_estimated_cost".
[FIX] sale_analytic_cost: Change dependency with "mrp_production_project_estimated_cost" by dependecy "mrp_production_estimated_cost".
Python
agpl-3.0
Daniel-CA/odoo-addons,alfredoavanzosc/odoo-addons,mikelarre/hr-addons,Daniel-CA/odoo-addons,esthermm/odoo-addons,agaldona/odoo-addons,alfredoavanzosc/odoo-addons,esthermm/odoo-addons,agaldona/odoo-addons,agaldona/odoo-addons,esthermm/odoo-addons,Daniel-CA/odoo-addons
<REPLACE_OLD> 'mrp_production_project_estimated_cost'], <REPLACE_NEW> 'mrp_production_estimated_cost'], <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- # (c) 2015 Ainara Galdona - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html { 'name': 'Sale Analytic Cost', "version": "8.0.1.0.0", "license": 'AGPL-3', "author": 'AvanzOSC,' 'Serv. Tecnol. Avanzados - Pedro M. Baeza', 'website': "http://www.odoomrp.com", "contributors": [ "Ainara Galdona <ainaragaldona@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <anajuaristi@avanzosc.es>", ], 'category': 'Sales', 'depends': ['sale', 'account', 'mrp_production_estimated_cost'], 'data': [], 'installable': True, }
[FIX] sale_analytic_cost: Change dependency with "mrp_production_project_estimated_cost" by dependecy "mrp_production_estimated_cost". # -*- coding: utf-8 -*- # (c) 2015 Ainara Galdona - AvanzOSC # License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html { 'name': 'Sale Analytic Cost', "version": "8.0.1.0.0", "license": 'AGPL-3', "author": 'AvanzOSC,' 'Serv. Tecnol. Avanzados - Pedro M. Baeza', 'website': "http://www.odoomrp.com", "contributors": [ "Ainara Galdona <ainaragaldona@avanzosc.es>", "Pedro M. Baeza <pedro.baeza@serviciosbaeza.com>", "Ana Juaristi <anajuaristi@avanzosc.es>", ], 'category': 'Sales', 'depends': ['sale', 'account', 'mrp_production_project_estimated_cost'], 'data': [], 'installable': True, }
e733b0d5192437a95c4eafd1babc02385fb4fcf7
cms/sitemaps/cms_sitemap.py
cms/sitemaps/cms_sitemap.py
# -*- coding: utf-8 -*- from django.contrib.sitemaps import Sitemap from django.utils import translation from cms.models import Title def from_iterable(iterables): """ Backport of itertools.chain.from_iterable """ for it in iterables: for element in it: yield element class CMSSitemap(Sitemap): changefreq = "monthly" priority = 0.5 def items(self): all_titles = Title.objects.public().filter(page__login_required=False) return all_titles def lastmod(self, title): modification_dates = [title.page.changed_date, title.page.publication_date] plugins_for_placeholder = lambda placeholder: placeholder.get_plugins() plugins = from_iterable(map(plugins_for_placeholder, title.page.placeholders.all())) plugin_modification_dates = map(lambda plugin: plugin.changed_date, plugins) modification_dates.extend(plugin_modification_dates) return max(modification_dates) def location(self, title): translation.activate(title.language) url = title.page.get_absolute_url(title.language) translation.deactivate() return url
# -*- coding: utf-8 -*- from django.contrib.sitemaps import Sitemap from django.db.models import Q from django.utils import translation from cms.models import Title def from_iterable(iterables): """ Backport of itertools.chain.from_iterable """ for it in iterables: for element in it: yield element class CMSSitemap(Sitemap): changefreq = "monthly" priority = 0.5 def items(self): # # It is counter-productive to provide entries for: # > Pages which redirect: # - If the page redirects to another page on this site, the # destination page will already be in the sitemap, and # - If the page redirects externally, then it shouldn't be # part of our sitemap anyway. # > Pages which cannot be accessed by anonymous users (like # search engines are). # all_titles = Title.objects.public().filter( Q(redirect='') | Q(redirect__isnull=True), page__login_required=False ) return all_titles def lastmod(self, title): modification_dates = [title.page.changed_date, title.page.publication_date] plugins_for_placeholder = lambda placeholder: placeholder.get_plugins() plugins = from_iterable(map(plugins_for_placeholder, title.page.placeholders.all())) plugin_modification_dates = map(lambda plugin: plugin.changed_date, plugins) modification_dates.extend(plugin_modification_dates) return max(modification_dates) def location(self, title): translation.activate(title.language) url = title.page.get_absolute_url(title.language) translation.deactivate() return url
Remove redirected pages from the sitemap
Remove redirected pages from the sitemap
Python
bsd-3-clause
ScholzVolkmer/django-cms,wyg3958/django-cms,donce/django-cms,robmagee/django-cms,DylannCordel/django-cms,frnhr/django-cms,jrief/django-cms,wuzhihui1123/django-cms,Livefyre/django-cms,dhorelik/django-cms,netzkolchose/django-cms,intip/django-cms,chkir/django-cms,jproffitt/django-cms,selecsosi/django-cms,czpython/django-cms,liuyisiyisi/django-cms,takeshineshiro/django-cms,saintbird/django-cms,czpython/django-cms,memnonila/django-cms,FinalAngel/django-cms,farhaadila/django-cms,webu/django-cms,divio/django-cms,SmithsonianEnterprises/django-cms,sephii/django-cms,jproffitt/django-cms,czpython/django-cms,astagi/django-cms,bittner/django-cms,AlexProfi/django-cms,dhorelik/django-cms,nostalgiaz/django-cms,netzkolchose/django-cms,iddqd1/django-cms,jeffreylu9/django-cms,astagi/django-cms,SachaMPS/django-cms,chmberl/django-cms,qnub/django-cms,chkir/django-cms,nostalgiaz/django-cms,SachaMPS/django-cms,rscnt/django-cms,benzkji/django-cms,360youlun/django-cms,cyberintruder/django-cms,jproffitt/django-cms,nostalgiaz/django-cms,Vegasvikk/django-cms,FinalAngel/django-cms,wuzhihui1123/django-cms,SachaMPS/django-cms,datakortet/django-cms,farhaadila/django-cms,nimbis/django-cms,owers19856/django-cms,nimbis/django-cms,cyberintruder/django-cms,frnhr/django-cms,intip/django-cms,rsalmaso/django-cms,qnub/django-cms,liuyisiyisi/django-cms,SofiaReis/django-cms,jsma/django-cms,Jaccorot/django-cms,sznekol/django-cms,frnhr/django-cms,nimbis/django-cms,jrief/django-cms,FinalAngel/django-cms,leture/django-cms,philippze/django-cms,nimbis/django-cms,FinalAngel/django-cms,jproffitt/django-cms,frnhr/django-cms,netzkolchose/django-cms,360youlun/django-cms,stefanw/django-cms,owers19856/django-cms,intip/django-cms,takeshineshiro/django-cms,robmagee/django-cms,MagicSolutions/django-cms,benzkji/django-cms,bittner/django-cms,nostalgiaz/django-cms,bittner/django-cms,isotoma/django-cms,vxsx/django-cms,SofiaReis/django-cms,Vegasvikk/django-cms,vxsx/django-cms,philippze/django-cms,vxsx/django-cms,saintbird/django-cms,SmithsonianEnterprises/django-cms,chkir/django-cms,Vegasvikk/django-cms,astagi/django-cms,Jaccorot/django-cms,stefanfoulis/django-cms,divio/django-cms,irudayarajisawa/django-cms,petecummings/django-cms,petecummings/django-cms,vad/django-cms,vstoykov/django-cms,mkoistinen/django-cms,jrclaramunt/django-cms,webu/django-cms,vad/django-cms,sznekol/django-cms,evildmp/django-cms,AlexProfi/django-cms,rsalmaso/django-cms,rryan/django-cms,josjevv/django-cms,yakky/django-cms,rsalmaso/django-cms,takeshineshiro/django-cms,DylannCordel/django-cms,jrief/django-cms,jeffreylu9/django-cms,stefanw/django-cms,intip/django-cms,stefanw/django-cms,mkoistinen/django-cms,chmberl/django-cms,isotoma/django-cms,donce/django-cms,chmberl/django-cms,saintbird/django-cms,leture/django-cms,datakortet/django-cms,dhorelik/django-cms,Livefyre/django-cms,keimlink/django-cms,divio/django-cms,jsma/django-cms,keimlink/django-cms,ScholzVolkmer/django-cms,MagicSolutions/django-cms,selecsosi/django-cms,qnub/django-cms,jsma/django-cms,kk9599/django-cms,andyzsf/django-cms,selecsosi/django-cms,vstoykov/django-cms,wyg3958/django-cms,jeffreylu9/django-cms,bittner/django-cms,jrclaramunt/django-cms,mkoistinen/django-cms,rsalmaso/django-cms,timgraham/django-cms,yakky/django-cms,rscnt/django-cms,vad/django-cms,kk9599/django-cms,benzkji/django-cms,stefanfoulis/django-cms,memnonila/django-cms,donce/django-cms,petecummings/django-cms,isotoma/django-cms,datakortet/django-cms,Livefyre/django-cms,josjevv/django-cms,wuzhihui1123/django-cms,evildmp/django-cms,josjevv/django-cms,stefanw/django-cms,jeffreylu9/django-cms,ScholzVolkmer/django-cms,robmagee/django-cms,MagicSolutions/django-cms,yakky/django-cms,irudayarajisawa/django-cms,czpython/django-cms,leture/django-cms,timgraham/django-cms,evildmp/django-cms,youprofit/django-cms,mkoistinen/django-cms,webu/django-cms,datakortet/django-cms,sephii/django-cms,vad/django-cms,jsma/django-cms,keimlink/django-cms,evildmp/django-cms,vxsx/django-cms,kk9599/django-cms,iddqd1/django-cms,Livefyre/django-cms,vstoykov/django-cms,liuyisiyisi/django-cms,AlexProfi/django-cms,wyg3958/django-cms,farhaadila/django-cms,netzkolchose/django-cms,360youlun/django-cms,Jaccorot/django-cms,iddqd1/django-cms,andyzsf/django-cms,sephii/django-cms,yakky/django-cms,sznekol/django-cms,stefanfoulis/django-cms,andyzsf/django-cms,andyzsf/django-cms,SmithsonianEnterprises/django-cms,benzkji/django-cms,irudayarajisawa/django-cms,youprofit/django-cms,selecsosi/django-cms,philippze/django-cms,timgraham/django-cms,cyberintruder/django-cms,stefanfoulis/django-cms,jrclaramunt/django-cms,memnonila/django-cms,SofiaReis/django-cms,rscnt/django-cms,rryan/django-cms,rryan/django-cms,youprofit/django-cms,divio/django-cms,wuzhihui1123/django-cms,isotoma/django-cms,jrief/django-cms,sephii/django-cms,rryan/django-cms,DylannCordel/django-cms,owers19856/django-cms
<REPLACE_OLD> -*- from <REPLACE_NEW> -*- from <REPLACE_END> <INSERT> django.db.models import Q from <INSERT_END> <REPLACE_OLD> translation from <REPLACE_NEW> translation from <REPLACE_END> <INSERT> # # It is counter-productive to provide entries for: # > Pages which redirect: # - If the page redirects to another page on this site, the # destination page will already be in the sitemap, and # - If the page redirects externally, then it shouldn't be # part of our sitemap anyway. # > Pages which cannot be accessed by anonymous users (like # search engines are). # <INSERT_END> <REPLACE_OLD> Title.objects.public().filter(page__login_required=False) <REPLACE_NEW> Title.objects.public().filter( Q(redirect='') | Q(redirect__isnull=True), page__login_required=False ) <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- from django.contrib.sitemaps import Sitemap from django.db.models import Q from django.utils import translation from cms.models import Title def from_iterable(iterables): """ Backport of itertools.chain.from_iterable """ for it in iterables: for element in it: yield element class CMSSitemap(Sitemap): changefreq = "monthly" priority = 0.5 def items(self): # # It is counter-productive to provide entries for: # > Pages which redirect: # - If the page redirects to another page on this site, the # destination page will already be in the sitemap, and # - If the page redirects externally, then it shouldn't be # part of our sitemap anyway. # > Pages which cannot be accessed by anonymous users (like # search engines are). # all_titles = Title.objects.public().filter( Q(redirect='') | Q(redirect__isnull=True), page__login_required=False ) return all_titles def lastmod(self, title): modification_dates = [title.page.changed_date, title.page.publication_date] plugins_for_placeholder = lambda placeholder: placeholder.get_plugins() plugins = from_iterable(map(plugins_for_placeholder, title.page.placeholders.all())) plugin_modification_dates = map(lambda plugin: plugin.changed_date, plugins) modification_dates.extend(plugin_modification_dates) return max(modification_dates) def location(self, title): translation.activate(title.language) url = title.page.get_absolute_url(title.language) translation.deactivate() return url
Remove redirected pages from the sitemap # -*- coding: utf-8 -*- from django.contrib.sitemaps import Sitemap from django.utils import translation from cms.models import Title def from_iterable(iterables): """ Backport of itertools.chain.from_iterable """ for it in iterables: for element in it: yield element class CMSSitemap(Sitemap): changefreq = "monthly" priority = 0.5 def items(self): all_titles = Title.objects.public().filter(page__login_required=False) return all_titles def lastmod(self, title): modification_dates = [title.page.changed_date, title.page.publication_date] plugins_for_placeholder = lambda placeholder: placeholder.get_plugins() plugins = from_iterable(map(plugins_for_placeholder, title.page.placeholders.all())) plugin_modification_dates = map(lambda plugin: plugin.changed_date, plugins) modification_dates.extend(plugin_modification_dates) return max(modification_dates) def location(self, title): translation.activate(title.language) url = title.page.get_absolute_url(title.language) translation.deactivate() return url
e7a771011e93660c811effb8357df035bae8f9a6
pentai/gui/settings_screen.py
pentai/gui/settings_screen.py
from kivy.uix.screenmanager import Screen #from kivy.properties import * from kivy.uix.settings import SettingSpacer from my_setting import * import audio as a_m class SettingsScreen(Screen): def __init__(self, *args, **kwargs): super(SettingsScreen, self).__init__(*args, **kwargs) def adjust_volumes(self, *args): a_m.adjust_volumes() def set_confirmation_popups(self, *args): self.app.set_confirmation_popups()
from kivy.uix.screenmanager import Screen #from kivy.properties import * from kivy.uix.settings import SettingSpacer from my_setting import * import audio as a_m from kivy.uix.widget import Widget class HSpacer(Widget): pass class VSpacer(Widget): pass class SettingsScreen(Screen): def __init__(self, *args, **kwargs): super(SettingsScreen, self).__init__(*args, **kwargs) def adjust_volumes(self, *args): a_m.adjust_volumes() def set_confirmation_popups(self, *args): self.app.set_confirmation_popups()
Use our own spacer widgets
Use our own spacer widgets
Python
mit
cropleyb/pentai,cropleyb/pentai,cropleyb/pentai
<REPLACE_OLD> a_m class <REPLACE_NEW> a_m from kivy.uix.widget import Widget class HSpacer(Widget): pass class VSpacer(Widget): pass class <REPLACE_END> <|endoftext|> from kivy.uix.screenmanager import Screen #from kivy.properties import * from kivy.uix.settings import SettingSpacer from my_setting import * import audio as a_m from kivy.uix.widget import Widget class HSpacer(Widget): pass class VSpacer(Widget): pass class SettingsScreen(Screen): def __init__(self, *args, **kwargs): super(SettingsScreen, self).__init__(*args, **kwargs) def adjust_volumes(self, *args): a_m.adjust_volumes() def set_confirmation_popups(self, *args): self.app.set_confirmation_popups()
Use our own spacer widgets from kivy.uix.screenmanager import Screen #from kivy.properties import * from kivy.uix.settings import SettingSpacer from my_setting import * import audio as a_m class SettingsScreen(Screen): def __init__(self, *args, **kwargs): super(SettingsScreen, self).__init__(*args, **kwargs) def adjust_volumes(self, *args): a_m.adjust_volumes() def set_confirmation_popups(self, *args): self.app.set_confirmation_popups()
3220b356297ec5fe61888a906543d0ee993f9f31
website/tests/test_database.py
website/tests/test_database.py
import database def test_encode_csv(): attributes = ( # strand, ref, alt, cdna_pos, exon, protein_id, is_ptm '+', 'R', 'H', 204, 'exon1', 123, False ) result = database.encode_csv(*attributes) assert result == '+RH0cc:exon1:7b' def test_decode_csv(): encoded_csv = '+RH0cc:exon1:7b' result = database.decode_csv(encoded_csv) assert result == dict(zip( ('strand', 'ref', 'alt', 'pos', 'cdna_pos', 'exon', 'protein_id', 'is_ptm'), ('+', 'R', 'H', 68, 204, 'exon1', 123, False) ))
import database def test_encode_csv(): test_data = ( # strand, ref, alt, cdna_pos, exon, protein_id, is_ptm (('+', 'R', 'H', 204, 'exon1', 123, False), '+RH0cc:exon1:7b'), (('-', 'R', 'H', 204, 'exon1', 123, True), '-RH1cc:exon1:7b'), ) for attributes, correct_result in test_data: result = database.encode_csv(*attributes) assert result == correct_result def test_decode_csv(): keys = ('strand', 'ref', 'alt', 'pos', 'cdna_pos', 'exon', 'protein_id', 'is_ptm') test_data = ( ('+RH0cc:exon1:7b', ('+', 'R', 'H', 68, 204, 'exon1', 123, False)), ('-RH1cc:exon1:7b', ('-', 'R', 'H', 68, 204, 'exon1', 123, True)), ) for encoded_csv, correct_result in test_data: result = database.decode_csv(encoded_csv) assert result == dict(zip(keys, correct_result))
Add more tests to database
Add more tests to database
Python
lgpl-2.1
reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualisation-Framework-for-Genome-Mutations,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB,reimandlab/Visualistion-Framework-for-Genome-Mutations,reimandlab/ActiveDriverDB
<REPLACE_OLD> attributes <REPLACE_NEW> test_data <REPLACE_END> <REPLACE_OLD> '+', <REPLACE_NEW> (('+', <REPLACE_END> <REPLACE_OLD> False <REPLACE_NEW> False), '+RH0cc:exon1:7b'), (('-', 'R', 'H', 204, 'exon1', 123, True), '-RH1cc:exon1:7b'), <REPLACE_END> <INSERT> for attributes, correct_result in test_data: <INSERT_END> <INSERT> <INSERT_END> <REPLACE_OLD> '+RH0cc:exon1:7b' def <REPLACE_NEW> correct_result def <REPLACE_END> <REPLACE_OLD> encoded_csv <REPLACE_NEW> keys <REPLACE_END> <REPLACE_OLD> '+RH0cc:exon1:7b' <REPLACE_NEW> ('strand', 'ref', 'alt', 'pos', 'cdna_pos', 'exon', 'protein_id', 'is_ptm') test_data = ( ('+RH0cc:exon1:7b', ('+', 'R', 'H', 68, 204, 'exon1', 123, False)), ('-RH1cc:exon1:7b', ('-', 'R', 'H', 68, 204, 'exon1', 123, True)), ) for encoded_csv, correct_result in test_data: <REPLACE_END> <DELETE> assert result == dict(zip( <DELETE_END> <REPLACE_OLD> ('strand', 'ref', 'alt', 'pos', 'cdna_pos', 'exon', 'protein_id', 'is_ptm'), ('+', 'R', 'H', 68, 204, 'exon1', 123, False) )) <REPLACE_NEW> assert result == dict(zip(keys, correct_result)) <REPLACE_END> <|endoftext|> import database def test_encode_csv(): test_data = ( # strand, ref, alt, cdna_pos, exon, protein_id, is_ptm (('+', 'R', 'H', 204, 'exon1', 123, False), '+RH0cc:exon1:7b'), (('-', 'R', 'H', 204, 'exon1', 123, True), '-RH1cc:exon1:7b'), ) for attributes, correct_result in test_data: result = database.encode_csv(*attributes) assert result == correct_result def test_decode_csv(): keys = ('strand', 'ref', 'alt', 'pos', 'cdna_pos', 'exon', 'protein_id', 'is_ptm') test_data = ( ('+RH0cc:exon1:7b', ('+', 'R', 'H', 68, 204, 'exon1', 123, False)), ('-RH1cc:exon1:7b', ('-', 'R', 'H', 68, 204, 'exon1', 123, True)), ) for encoded_csv, correct_result in test_data: result = database.decode_csv(encoded_csv) assert result == dict(zip(keys, correct_result))
Add more tests to database import database def test_encode_csv(): attributes = ( # strand, ref, alt, cdna_pos, exon, protein_id, is_ptm '+', 'R', 'H', 204, 'exon1', 123, False ) result = database.encode_csv(*attributes) assert result == '+RH0cc:exon1:7b' def test_decode_csv(): encoded_csv = '+RH0cc:exon1:7b' result = database.decode_csv(encoded_csv) assert result == dict(zip( ('strand', 'ref', 'alt', 'pos', 'cdna_pos', 'exon', 'protein_id', 'is_ptm'), ('+', 'R', 'H', 68, 204, 'exon1', 123, False) ))
6fa13c56c38b14226d1902f8d686241ed88b875a
satnogsclient/scheduler/tasks.py
satnogsclient/scheduler/tasks.py
# -*- coding: utf-8 -*- from urlparse import urljoin import requests from dateutil import parser from satnogsclient import settings from satnogsclient.scheduler import scheduler def spawn_observation(*args, **kwargs): raise NotImplementedError def get_jobs(): """Query SatNOGS Network API to GET jobs.""" url = urljoin(settings.NETWORK_API_URL, 'jobs') params = {'ground_station': settings.GROUND_STATION_ID} response = requests.get(url, params=params) if not response.status_code == 200: raise Exception('Status code: {0} on request: {1}'.format(response.status_code, url)) for job in scheduler.get_jobs(): if job.name == spawn_observation.__name__: job.remove() for obj in response.json(): start = parser.parse(obj['start']) job_id = str(obj['id']) scheduler.add_job(spawn_observation, 'date', run_date=start, id=job_id, kwargs=obj)
# -*- coding: utf-8 -*- from urlparse import urljoin import requests from dateutil import parser from satnogsclient import settings from satnogsclient.observer import Observer from satnogsclient.scheduler import scheduler def spawn_observation(*args, **kwargs): obj = kwargs.pop('obj') observer = Observer() tle = { 'tle0': obj['tle0'], 'tle1': obj['tle1'], 'tle2': obj['tle2'] } end = parser.parse(obj['end']) observer.setup(tle=tle, observation_end=end, frequency=obj['frequency']) observer.observe() def get_jobs(): """Query SatNOGS Network API to GET jobs.""" url = urljoin(settings.NETWORK_API_URL, 'jobs') params = {'ground_station': settings.GROUND_STATION_ID} response = requests.get(url, params=params) if not response.status_code == 200: raise Exception('Status code: {0} on request: {1}'.format(response.status_code, url)) for job in scheduler.get_jobs(): if job.name == spawn_observation.__name__: job.remove() for obj in response.json(): start = parser.parse(obj['start']) job_id = str(obj['id']) kwargs = {'obj': obj} scheduler.add_job(spawn_observation, 'date', run_date=start, id=job_id, kwargs=kwargs)
Initialize and call observer on new observation task.
Initialize and call observer on new observation task.
Python
agpl-3.0
adamkalis/satnogs-client,cshields/satnogs-client,cshields/satnogs-client,adamkalis/satnogs-client
<INSERT> satnogsclient.observer import Observer from <INSERT_END> <REPLACE_OLD> raise NotImplementedError def <REPLACE_NEW> obj = kwargs.pop('obj') observer = Observer() tle = { 'tle0': obj['tle0'], 'tle1': obj['tle1'], 'tle2': obj['tle2'] } end = parser.parse(obj['end']) observer.setup(tle=tle, observation_end=end, frequency=obj['frequency']) observer.observe() def <REPLACE_END> <INSERT> kwargs = {'obj': obj} <INSERT_END> <REPLACE_OLD> kwargs=obj) <REPLACE_NEW> kwargs=kwargs) <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- from urlparse import urljoin import requests from dateutil import parser from satnogsclient import settings from satnogsclient.observer import Observer from satnogsclient.scheduler import scheduler def spawn_observation(*args, **kwargs): obj = kwargs.pop('obj') observer = Observer() tle = { 'tle0': obj['tle0'], 'tle1': obj['tle1'], 'tle2': obj['tle2'] } end = parser.parse(obj['end']) observer.setup(tle=tle, observation_end=end, frequency=obj['frequency']) observer.observe() def get_jobs(): """Query SatNOGS Network API to GET jobs.""" url = urljoin(settings.NETWORK_API_URL, 'jobs') params = {'ground_station': settings.GROUND_STATION_ID} response = requests.get(url, params=params) if not response.status_code == 200: raise Exception('Status code: {0} on request: {1}'.format(response.status_code, url)) for job in scheduler.get_jobs(): if job.name == spawn_observation.__name__: job.remove() for obj in response.json(): start = parser.parse(obj['start']) job_id = str(obj['id']) kwargs = {'obj': obj} scheduler.add_job(spawn_observation, 'date', run_date=start, id=job_id, kwargs=kwargs)
Initialize and call observer on new observation task. # -*- coding: utf-8 -*- from urlparse import urljoin import requests from dateutil import parser from satnogsclient import settings from satnogsclient.scheduler import scheduler def spawn_observation(*args, **kwargs): raise NotImplementedError def get_jobs(): """Query SatNOGS Network API to GET jobs.""" url = urljoin(settings.NETWORK_API_URL, 'jobs') params = {'ground_station': settings.GROUND_STATION_ID} response = requests.get(url, params=params) if not response.status_code == 200: raise Exception('Status code: {0} on request: {1}'.format(response.status_code, url)) for job in scheduler.get_jobs(): if job.name == spawn_observation.__name__: job.remove() for obj in response.json(): start = parser.parse(obj['start']) job_id = str(obj['id']) scheduler.add_job(spawn_observation, 'date', run_date=start, id=job_id, kwargs=obj)
31d2686555a93ddadd3713c3c880b75641d98d89
scripts/read_reldist.py
scripts/read_reldist.py
import os import yaml from thermof.parameters import plot_parameters from thermof.read import read_framework_distance # -------------------------------------------------------------------------------------------------- main = '' results_file = '%s-reldist-results.yaml' % os.path.basename(main) run_list_file = '%s-run-list.yaml' % os.path.basename(main) # -------------------------------------------------------------------------------------------------- run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))] dist_data = read_framework_distance(run_list, plot_parameters['f_dist']) with open(results_file, 'w') as rfile: yaml.dump(dist_data, rfile) with open(run_list_file, 'w') as rlfile: yaml.dump(run_list, rlfile)
Add script for reading reldist for multiple trials
Add script for reading reldist for multiple trials
Python
mit
kbsezginel/tee_mof,kbsezginel/tee_mof
<INSERT> import os import yaml from thermof.parameters import plot_parameters from thermof.read import read_framework_distance # -------------------------------------------------------------------------------------------------- main = '' results_file = '%s-reldist-results.yaml' % os.path.basename(main) run_list_file = '%s-run-list.yaml' % os.path.basename(main) # -------------------------------------------------------------------------------------------------- run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))] dist_data = read_framework_distance(run_list, plot_parameters['f_dist']) with open(results_file, 'w') as rfile: <INSERT_END> <INSERT> yaml.dump(dist_data, rfile) with open(run_list_file, 'w') as rlfile: yaml.dump(run_list, rlfile) <INSERT_END> <|endoftext|> import os import yaml from thermof.parameters import plot_parameters from thermof.read import read_framework_distance # -------------------------------------------------------------------------------------------------- main = '' results_file = '%s-reldist-results.yaml' % os.path.basename(main) run_list_file = '%s-run-list.yaml' % os.path.basename(main) # -------------------------------------------------------------------------------------------------- run_list = [os.path.join(main, i, 'Run1') for i in os.listdir(main) if os.path.isdir(os.path.join(main, i))] dist_data = read_framework_distance(run_list, plot_parameters['f_dist']) with open(results_file, 'w') as rfile: yaml.dump(dist_data, rfile) with open(run_list_file, 'w') as rlfile: yaml.dump(run_list, rlfile)
Add script for reading reldist for multiple trials
43c3a8a94c7783aadb440e529645f7db7c7913ff
successstories/forms.py
successstories/forms.py
from django import forms from .models import Story from cms.forms import ContentManageableModelForm class StoryForm(ContentManageableModelForm): class Meta: model = Story fields = ( 'name', 'company_name', 'company_url', 'category', 'author', 'author_email', 'pull_quote', 'content' ) def clean_name(self): name = self.cleaned_data.get('name') story = Story.objects.filter(name=name).exclude(pk=self.instance.pk) if name is not None and story.exists(): raise forms.ValidationError('Please use a unique name.') return name
from django import forms from .models import Story from cms.forms import ContentManageableModelForm class StoryForm(ContentManageableModelForm): pull_quote = forms.CharField(widget=forms.Textarea(attrs={'rows': 5})) class Meta: model = Story fields = ( 'name', 'company_name', 'company_url', 'category', 'author', 'author_email', 'pull_quote', 'content' ) def clean_name(self): name = self.cleaned_data.get('name') story = Story.objects.filter(name=name).exclude(pk=self.instance.pk) if name is not None and story.exists(): raise forms.ValidationError('Please use a unique name.') return name
Reduce textarea height in Story form
Reduce textarea height in Story form
Python
apache-2.0
proevo/pythondotorg,manhhomienbienthuy/pythondotorg,manhhomienbienthuy/pythondotorg,Mariatta/pythondotorg,python/pythondotorg,python/pythondotorg,Mariatta/pythondotorg,manhhomienbienthuy/pythondotorg,proevo/pythondotorg,Mariatta/pythondotorg,python/pythondotorg,manhhomienbienthuy/pythondotorg,python/pythondotorg,Mariatta/pythondotorg,proevo/pythondotorg,proevo/pythondotorg
<INSERT> pull_quote = forms.CharField(widget=forms.Textarea(attrs={'rows': 5})) <INSERT_END> <|endoftext|> from django import forms from .models import Story from cms.forms import ContentManageableModelForm class StoryForm(ContentManageableModelForm): pull_quote = forms.CharField(widget=forms.Textarea(attrs={'rows': 5})) class Meta: model = Story fields = ( 'name', 'company_name', 'company_url', 'category', 'author', 'author_email', 'pull_quote', 'content' ) def clean_name(self): name = self.cleaned_data.get('name') story = Story.objects.filter(name=name).exclude(pk=self.instance.pk) if name is not None and story.exists(): raise forms.ValidationError('Please use a unique name.') return name
Reduce textarea height in Story form from django import forms from .models import Story from cms.forms import ContentManageableModelForm class StoryForm(ContentManageableModelForm): class Meta: model = Story fields = ( 'name', 'company_name', 'company_url', 'category', 'author', 'author_email', 'pull_quote', 'content' ) def clean_name(self): name = self.cleaned_data.get('name') story = Story.objects.filter(name=name).exclude(pk=self.instance.pk) if name is not None and story.exists(): raise forms.ValidationError('Please use a unique name.') return name
5012ff1dfbcd8e7d0d9b0691f45c7b3efd811a08
adventure/__init__.py
adventure/__init__.py
"""The Adventure game.""" def load_advent_dat(data): import os from .data import parse datapath = os.path.join(os.path.dirname(__file__), 'advent.dat') with open(datapath, 'r', encoding='ascii') as datafile: parse(data, datafile) def play(seed=None): """Turn the Python prompt into an Adventure game. With `seed` the caller can supply an integer to start the random number generator at a known state. When `quiet` is true, no output is printed as the game is played; the caller of a command has to manually check `_game.output` for the result, which makes it possible to write very quiet tests. """ global _game from .game import Game from .prompt import install_words _game = Game(seed) load_advent_dat(_game) install_words(_game) _game.start() print(_game.output[:-1]) def resume(savefile, quiet=False): global _game from .game import Game from .prompt import install_words _game = Game.resume(savefile) install_words(_game) if not quiet: print('GAME RESTORED\n')
"""The Adventure game.""" def load_advent_dat(data): import os from .data import parse datapath = os.path.join(os.path.dirname(__file__), 'advent.dat') with open(datapath, 'r', encoding='ascii') as datafile: parse(data, datafile) def play(seed=None): """Turn the Python prompt into an Adventure game. With optional the `seed` argument the caller can supply an integer to start the Python random number generator at a known state. """ global _game from .game import Game from .prompt import install_words _game = Game(seed) load_advent_dat(_game) install_words(_game) _game.start() print(_game.output[:-1]) def resume(savefile, quiet=False): global _game from .game import Game from .prompt import install_words _game = Game.resume(savefile) install_words(_game) if not quiet: print('GAME RESTORED\n')
Remove outdated parameter from docstring
Remove outdated parameter from docstring
Python
apache-2.0
devinmcgloin/advent,devinmcgloin/advent
<INSERT> optional the <INSERT_END> <INSERT> argument <INSERT_END> <REPLACE_OLD> integer <REPLACE_NEW> integer <REPLACE_END> <REPLACE_OLD> random <REPLACE_NEW> Python random <REPLACE_END> <REPLACE_OLD> state. When `quiet` is true, no output is printed as the game is played; the caller of a command has to manually check `_game.output` for the result, which makes it possible to write very quiet tests. <REPLACE_NEW> state. <REPLACE_END> <|endoftext|> """The Adventure game.""" def load_advent_dat(data): import os from .data import parse datapath = os.path.join(os.path.dirname(__file__), 'advent.dat') with open(datapath, 'r', encoding='ascii') as datafile: parse(data, datafile) def play(seed=None): """Turn the Python prompt into an Adventure game. With optional the `seed` argument the caller can supply an integer to start the Python random number generator at a known state. """ global _game from .game import Game from .prompt import install_words _game = Game(seed) load_advent_dat(_game) install_words(_game) _game.start() print(_game.output[:-1]) def resume(savefile, quiet=False): global _game from .game import Game from .prompt import install_words _game = Game.resume(savefile) install_words(_game) if not quiet: print('GAME RESTORED\n')
Remove outdated parameter from docstring """The Adventure game.""" def load_advent_dat(data): import os from .data import parse datapath = os.path.join(os.path.dirname(__file__), 'advent.dat') with open(datapath, 'r', encoding='ascii') as datafile: parse(data, datafile) def play(seed=None): """Turn the Python prompt into an Adventure game. With `seed` the caller can supply an integer to start the random number generator at a known state. When `quiet` is true, no output is printed as the game is played; the caller of a command has to manually check `_game.output` for the result, which makes it possible to write very quiet tests. """ global _game from .game import Game from .prompt import install_words _game = Game(seed) load_advent_dat(_game) install_words(_game) _game.start() print(_game.output[:-1]) def resume(savefile, quiet=False): global _game from .game import Game from .prompt import install_words _game = Game.resume(savefile) install_words(_game) if not quiet: print('GAME RESTORED\n')