text
stringlengths 4
1.02M
| meta
dict |
---|---|
from setuptools import setup
setup(name='learningml',
version='0.3',
description='This repository demonstrates how to make a project pip installable, write a Python module in C and use scikit-learn, keras and spearmint.',
url='https://github.com/weissercn/learningml',
author='Constantin Weisser',
author_email='[email protected]',
license='MIT',
packages=['learningml'],
install_requires=[
'numpy',
'scipy',
'scikit-learn',
'keras',
],
zip_safe=False)
| {
"content_hash": "d50f0e0f21933631ac1dd149c2f1d088",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 158,
"avg_line_length": 32.705882352941174,
"alnum_prop": 0.6187050359712231,
"repo_name": "weissercn/learningml",
"id": "72bc8fa0b6e920aa6bfabbd030a60a5584c2cc6a",
"size": "556",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "902"
},
{
"name": "Python",
"bytes": "580027"
},
{
"name": "Shell",
"bytes": "191"
}
],
"symlink_target": ""
} |
import pytest
from .config import *
@pytest.fixture
def app():
main.app.config["TESTING"] = True
main.app.config["SQLALCHEMY_DATABASE_URI"] = TEST_DATABASE_URI
main.db.create_all()
return main.app
@pytest.fixture
def test_client(app):
return app.test_client()
def test_hello(test_client):
assert isinstance(test_client, FlaskClient)
def test_keyboard(test_client):
response = test_client.get("/keyboard")
check_response(response)
keyboard = json.loads(response.data)
assert keyboard["type"] == "buttons"
assert keyboard["buttons"] == ["ํ๋ฒํผ 1", "ํ๋ฒํผ 2", "ํ๋ฒํผ 3"]
def test_fail_message(test_client):
data = dict(
user_key="test_id"
)
json_data = json.dumps(data)
response = test_client.post("/message", data=json_data, content_type="application/json")
assert response.status_code == 400
message = json.loads(response.data)
assert message["message"]["text"] == "์ค๋ฅ๊ฐ ๋ฐ์ํ์์ต๋๋ค."
def test_message(test_client):
data = dict(
user_key="test_id",
type="text",
content="ํ๋ฒํผ 1"
)
json_data = json.dumps(data)
response = test_client.post("/message", data=json_data, content_type="application/json")
check_response(response)
message = json.loads(response.data)
assert message["message"]["text"] == "๊ธฐ๋ณธ ๋ฉ์์ง"
assert message["keyboard"]["type"] == "buttons"
assert message["keyboard"]["buttons"] == ["ํ๋ฒํผ 1", "ํ๋ฒํผ 2", "ํ๋ฒํผ 3"]
def test_add_friend(test_client):
data = dict(
user_key="test_id"
)
json_data = json.dumps(data)
response = test_client.post("/friend", data=json_data, content_type="application/json")
check_success_response(response)
def test_block_firend(test_client):
user_key = "test_id"
response = test_client.delete("/friend/{}".format(user_key))
check_success_response(response)
def test_exit_chatroom(test_client):
user_key = "test_id"
response = test_client.delete("/chat_room/{}".format(user_key))
check_success_response(response)
def test_remove_keyboard(test_client):
msg = message.BaseMessage()
msg.remove_keyboard()
assert "keyboard" not in msg.get_message()
def test_add_photo(test_client):
msg = message.BaseMessage()
url = "https://www.python.org/static/img/python-logo.png"
msg.add_photo(url, 320, 240)
assert "photo" in msg.get_message()["message"]
assert url == msg.get_message()["message"]["photo"]["url"]
assert 320 == msg.get_message()["message"]["photo"]["width"]
assert 240 == msg.get_message()["message"]["photo"]["height"]
def test_add_message_button(test_client):
msg = message.BaseMessage()
url = "https://www.ruby-lang.org/ko/"
msg.add_message_button(url, "๋ฃจ๋น")
assert "message_button" in msg.get_message()["message"]
assert url == msg.get_message()["message"]["message_button"]["url"]
assert "๋ฃจ๋น" == msg.get_message()["message"]["message_button"]["label"]
def test_update_message(teset_client):
msg = message.BaseMessage()
msg.update_message("ํ์ด์ฌ")
assert "ํ์ด์ฌ" == msg.get_message()["message"]["text"]
def test_update_message(test_client):
msg = message.BaseMessage()
msg.update_keyboard(["ํ์ด์ฌ", "๋ฃจ๋น", "์ํฌ"])
assert ["ํ์ด์ฌ", "๋ฃจ๋น", "์ํฌ"] == msg.get_message()["keyboard"]["buttons"]
def check_success_response(response):
check_response(response)
message = json.loads(response.data)
assert message["message"] == "SUCCESS"
assert message["comment"] == "์ ์ ์๋ต"
def check_response(response):
assert response.status_code == 200
assert response.content_type == "application/json"
assert isinstance(response.data, bytes)
assert isinstance(json.loads(response.data), dict)
| {
"content_hash": "39385a8c95297978a95a97268793130d",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 92,
"avg_line_length": 29.385826771653544,
"alnum_prop": 0.6543408360128617,
"repo_name": "JungWinter/yellowid-flask",
"id": "1a8683fdb1d1d0da7e6efa1309788057c6bec6c6",
"size": "3860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_routing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "20052"
}
],
"symlink_target": ""
} |
"""Exception types that can be raised by Gnuplot.py."""
class Error(Exception):
"""All our exceptions are derived from this one."""
pass
class OptionError(Error):
"""Raised for unrecognized option(s)"""
pass
class DataError(Error):
"""Raised for data in the wrong format"""
pass
| {
"content_hash": "6a353aae5d824d5fcade124df1fe05db",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 55,
"avg_line_length": 17.27777777777778,
"alnum_prop": 0.6591639871382636,
"repo_name": "pombredanne/ompc",
"id": "0a4e27f2f312c23032ba09cf9f2f31160f7c8835",
"size": "546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ompclib/gplot/Errors.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "250"
},
{
"name": "Python",
"bytes": "498364"
}
],
"symlink_target": ""
} |
from cgum.utility import *
from cgum.program import Program
import codecs
import json
import tempfile
from subprocess import Popen
class Mappings(object):
@staticmethod
def from_json(jsn):
before_to_after = dict()
after_to_before = dict()
for m in jsn:
src = int(m['src'])
dest = int(m['dest'])
before_to_after[src] = dest
after_to_before[dest] = src
return Mappings(before_to_after, after_to_before)
def __init__(self, before_to_after, after_to_before):
self.before_to_after = before_to_after
self.after_to_before = after_to_before
# Given the number of a node in P, returns the number of the matching node
# in P', or None if no such match exists.
def after(self, num):
return self.before_to_after.get(num, None)
# Given the number of a node in P', returns the number of the matching node
# in P, or None if no such match exists.
def before(self, num):
return self.after_to_before.get(num, None)
class Action(object):
@staticmethod
def from_json_with_mappings(jsn, mapping):
return ({
'insert': Insert,
'update': Update,
'move': Move,
'delete': Delete
})[jsn['action']].from_json_with_mappings(jsn, mapping)
# Gives the ID of the node in the original tree that was deleted.
class Delete(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
return Delete(jsn['tree'])
def __init__(self, node_id):
self.__deleted_id = node_id
self.__deleted = None
def annotate(self, before, after):
self.__deleted = before.find(self.__deleted_id)
# Returns the deleted node from the before AST
def deleted(self):
return self.__deleted
def __str__(self):
return "DEL(%d)" % self.__deleted_id
# Position parameter is NOT to be trusted
class Move(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
from_id = jsn['tree']
to_id = mapping.after(from_id)
return Move(from_id, to_id, jsn['parent'], jsn['at'])
def __init__(self, from_id, to_id, parent_id, position):
self.__from_id = from_id
self.__to_id = to_id
self.__parent_id = parent_id
self.__position = position
self.__from = None
self.__to = None
# Annotates this action by recording the from and to nodes
def annotate(self, before, after):
self.__from = before.find(self.__from_id)
self.__to = after.find(self.__to_id)
# Returns the node in the before AST
def moved_from(self):
if self.__from is None:
raise Exception("moved_from: action hasn't been annotated")
return self.__from
# Returns the node in the after AST
def moved_to(self):
if self.__to is None:
raise Exception("moved_to: action hasn't been annotated")
return self.__to
# Returns the ID of the node that was moved in the before AST
def moved_from_id(self):
return self.__to_id
def moved_to_id(self):
return self.__from_id
# Returns the original (incorrect) GumTree description
def __str__(self):
return "MOV(%d, %d, %d)" % \
(self.__from_id, self.__parent_id, self.__position)
# Doesn't handle insert root?
class Insert(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
return Insert(jsn['tree'], jsn['parent'], jsn['at'])
def __init__(self, inserted_id, parent_id, position):
self.__inserted_id = inserted_id
self.__parent_id = parent_id
self.__position = position
self.__inserted = None
self.__parent = None
# Annotates this action by caching the inserted and parent nodes
def annotate(self, before, after):
self.__inserted = after.find(self.__inserted_id)
self.__parent = after.find(self.__parent_id)
# Returns the node which was inserted into the AST
def inserted(self):
return self.__inserted
# Returns the parent of the node that was inserted into the AST
def parent(self):
return self.__parent
# Returns the ID of the node that was inserted into the AST
def inserted_id(self):
return self.__child_id
def parent_id(self):
return self.__parent_id
# Returns the position that the node was inserted into its parents subtree,
# according to GumTree output; flawed.
def position(self):
return self.__position
def __str__(self):
return "INS(%d, %d, %d)" % \
(self.__inserted_id, self.__parent_id, self.__position)
class Update(Action):
@staticmethod
def from_json_with_mappings(jsn, mapping):
after_id = mapping.after(jsn['tree'])
return Update(jsn['tree'], after_id, jsn['label'])
def __init__(self, before_id, after_id, label):
self.__before_id = before_id
self.__after_id = after_id
self.__label = label
self.__before = None
self.__after = None
# Annotates this action by caching the before and after forms of the node
def annotate(self, before, after):
self.__before = before.find(self.__before_id)
self.__after = after.find(self.__after_id)
# Returns the node that was the subject of this Update operation, in P
def before(self):
return self.__before
# Returns the node that was the subject of this Update operation, in P'
def after(self):
return self.__after
# Returns the ID of the node in P
def before_id(self):
return self.__before_id
# Returns the ID of the node in P'
def after_id(self):
return self.__after_id
# Returns the updated label for this node
def label(self):
return self.__label
# Returns a string description of the operation, in its original GumTree
# encoding
def __str__(self):
return "UPD(%d, %s)" % (self.__before_id, self.__label)
class AnnotatedDiff(object):
@staticmethod
def from_source_files(fn_from, fn_to):
tmp_f = tempfile.NamedTemporaryFile()
AnnotatedDiff.parse_to_json_file(fn_from, fn_to, tmp_f)
before = Program.from_source_file(fn_from)
after = Program.from_source_file(fn_to)
return AnnotatedDiff.from_file(tmp_f.name,\
before,\
after)
@staticmethod
def parse_to_json_file(fn_from, fn_to, jsn_fn):
assert Popen(("gumtree jsondiff \"%s\" \"%s\"" % (fn_from, fn_to)), \
shell=True, stdin=FNULL, stdout=jsn_fn).wait() == 0
@staticmethod
def from_file(fn, before, after):
with codecs.open(fn, 'r', 'utf-8') as f:
return AnnotatedDiff.from_json(json.load(f), before, after)
@staticmethod
def from_json(jsn, before, after):
mappings = Mappings.from_json(jsn['matches'])
actions = \
[Action.from_json_with_mappings(a, mappings) for a in jsn['actions']]
return AnnotatedDiff(actions, mappings, before, after)
def __init__(self, actions, mappings, before, after):
self.__actions = actions
self.__mappings = mappings
self.__before = before
self.__after = after
self.__insertions = []
self.__deletions = []
self.__updates = []
self.__moves = []
# Annotate and group the actions
for action in self.__actions:
action.annotate(before, after)
({
Insert: self.__insertions,
Delete: self.__deletions,
Update: self.__updates,
Move: self.__moves
})[action.__class__].append(action)
def before(self):
return self.__before
def after(self):
return self.__after
def actions(self):
return self.__actions
def insertions(self):
return self.__insertions
def deletions(self):
return self.__deletions
def moves(self):
return self.__moves
def updates(self):
return self.__updates
def mappings(self):
return self.__mappings
# checks whether a given node in P' was moved to that location
def was_moved_to(self, to):
return any([to == move.moved_to() in self.moves()])
# checks whether a given node in P was moved to another location
def was_moved_from(self, frm):
return any([frm == move.moved_from() in self.moves()])
# Given a node in P, return the matching node in P', or None if no such
# match exists.
def was_is(self, node):
assert not node is None, "was_is: provided node must not be null"
was_num = node.number()
is_num = self.__mappings.after(was_num)
if is_num is None:
return None
else:
return self.__after.find(is_num)
# Given a node in P', return the matching node in P, or None if no such
# match exists.
def is_was(self, node):
assert not node is None, "is_was: provided node must not be null"
is_num = node.number()
was_num = self.__mappings.before(is_num)
if was_num is None:
return None
else:
return self.__before.find(was_num)
def __str__(self):
return '\n'.join(map(str, self.__actions))
| {
"content_hash": "4b520ed7758377cbbd6eccefe784bd7b",
"timestamp": "",
"source": "github",
"line_count": 288,
"max_line_length": 81,
"avg_line_length": 32.763888888888886,
"alnum_prop": 0.5922000847816872,
"repo_name": "ChrisTimperley/PythonCGum",
"id": "2fe555bf3f8a8e05534d9f0d5bda5368d0a72daa",
"size": "9451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cgum/diff.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "580845"
},
{
"name": "Python",
"bytes": "49990"
}
],
"symlink_target": ""
} |
from importlib import import_module
from django.apps import apps
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import emit_post_migrate_signal, sql_flush
from django.db import DEFAULT_DB_ALIAS, connections
class Command(BaseCommand):
help = (
'Removes ALL DATA from the database, including data added during '
'migrations. Does not achieve a "fresh install" state.'
)
stealth_options = ('reset_sequences', 'allow_cascade', 'inhibit_post_migrate')
def add_arguments(self, parser):
parser.add_argument(
'--noinput', '--no-input', action='store_false', dest='interactive',
help='Tells Django to NOT prompt the user for input of any kind.',
)
parser.add_argument(
'--database', action='store', dest='database', default=DEFAULT_DB_ALIAS,
help='Nominates a database to flush. Defaults to the "default" database.',
)
def handle(self, **options):
database = options['database']
connection = connections[database]
verbosity = options['verbosity']
interactive = options['interactive']
# The following are stealth options used by Django's internals.
reset_sequences = options.get('reset_sequences', True)
allow_cascade = options.get('allow_cascade', False)
inhibit_post_migrate = options.get('inhibit_post_migrate', False)
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_config in apps.get_app_configs():
try:
import_module('.management', app_config.name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True,
reset_sequences=reset_sequences,
allow_cascade=allow_cascade)
if interactive:
confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to an empty state.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
connection.ops.execute_sql_flush(database, sql_list)
except Exception as exc:
raise CommandError(
"Database %s couldn't be flushed. Possible reasons:\n"
" * The database isn't running or isn't configured correctly.\n"
" * At least one of the expected database tables doesn't exist.\n"
" * The SQL was invalid.\n"
"Hint: Look at the output of 'django-admin sqlflush'. "
"That's the SQL this command wasn't able to run.\n" % (
connection.settings_dict['NAME'],
)
) from exc
# Empty sql_list may signify an empty database and post_migrate would then crash
if sql_list and not inhibit_post_migrate:
# Emit the post migrate signal. This allows individual applications to
# respond as if the database had been migrated from scratch.
emit_post_migrate_signal(verbosity, interactive, database)
else:
self.stdout.write("Flush cancelled.\n")
| {
"content_hash": "90e534a92002b3ee5fb56d648f41b877",
"timestamp": "",
"source": "github",
"line_count": 82,
"max_line_length": 92,
"avg_line_length": 43.78048780487805,
"alnum_prop": 0.6072423398328691,
"repo_name": "edmorley/django",
"id": "f6ae83940a534c18bf9029f0ba7b1f023ab296c2",
"size": "3590",
"binary": false,
"copies": "22",
"ref": "refs/heads/master",
"path": "django/core/management/commands/flush.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "55935"
},
{
"name": "HTML",
"bytes": "202902"
},
{
"name": "JavaScript",
"bytes": "252653"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Python",
"bytes": "11837174"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "130"
}
],
"symlink_target": ""
} |
import sys
try:
pyreadline_codepage=sys.stdout.encoding
except AttributeError: #This error occurs when pdb imports readline and doctest has replaced
#stdout with stdout collector
pyreadline_codepage="ascii" #assume ascii codepage
def ensure_unicode(text):
"""helper to ensure that text passed to WriteConsoleW is unicode"""
if isinstance(text, str):
return text.decode(pyreadline_codepage, "replace")
return text
def ensure_str(text):
"""Convert unicode to str using pyreadline_codepage"""
if isinstance(text, unicode):
return text.encode(pyreadline_codepage, "replace")
return text
| {
"content_hash": "958a00912cca2a40159f723b2cf16283",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 100,
"avg_line_length": 34.95,
"alnum_prop": 0.6666666666666666,
"repo_name": "chvrga/outdoor-explorer",
"id": "f33fa099c5dec12401b6f5ceb82eadbca9244350",
"size": "1101",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "java/play-1.4.4/python/Lib/site-packages/pyreadline/unicode_helper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "4720"
},
{
"name": "C",
"bytes": "76128"
},
{
"name": "C++",
"bytes": "31284"
},
{
"name": "CSS",
"bytes": "107401"
},
{
"name": "HTML",
"bytes": "1754737"
},
{
"name": "Java",
"bytes": "2441299"
},
{
"name": "JavaScript",
"bytes": "1405163"
},
{
"name": "PLpgSQL",
"bytes": "1377"
},
{
"name": "Python",
"bytes": "8991412"
},
{
"name": "Ruby",
"bytes": "295601"
},
{
"name": "Shell",
"bytes": "7499"
},
{
"name": "XQuery",
"bytes": "544017"
},
{
"name": "XSLT",
"bytes": "1099"
}
],
"symlink_target": ""
} |
'''
Copyright 2016, EMC, Inc.
Author(s):
George Paulos
'''
import fit_path # NOQA: unused import
import os
import sys
import subprocess
import fit_common
# Select test group here using @attr
from nose.plugins.attrib import attr
@attr(all=True, regression=True, smoke=True)
class redfish10_api_schemas(fit_common.unittest.TestCase):
def test_redfish_v1_schemas(self):
api_data = fit_common.rackhdapi('/redfish/v1/Schemas')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
# iterate through links
for item in api_data['json']['Members']:
self.assertEqual(fit_common.rackhdapi(item['@odata.id'])['status'], 200, "Bad or missing link: " + item['@odata.id'])
def test_redfish_v1_schemastore_en(self):
api_data = fit_common.rackhdapi('/redfish/v1/Schemas')
self.assertEqual(api_data['status'], 200, "Was expecting code 200. Got " + str(api_data['status']))
for item in api_data['json']['Members']:
schema_data = fit_common.rackhdapi('/redfish/v1/SchemaStore/en/' + item['@odata.id'].replace('/redfish/v1/Schemas/', ''))
self.assertEqual(schema_data['status'], 200, "Was expecting code 200. Got " + str(schema_data['status']))
if __name__ == '__main__':
fit_common.unittest.main()
| {
"content_hash": "7e356ed80a96ad8b74672ea424d8bed3",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 133,
"avg_line_length": 38.2,
"alnum_prop": 0.6581899775617053,
"repo_name": "srinia6/RackHD",
"id": "e7a5d35027b92f8db95233e5cc363cb44660581f",
"size": "1337",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "test/tests/redfish10/test_redfish10_api_schemas.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "696"
},
{
"name": "Python",
"bytes": "1530855"
},
{
"name": "Ruby",
"bytes": "10910"
},
{
"name": "Shell",
"bytes": "71655"
}
],
"symlink_target": ""
} |
import os
import pytest
import sh
from molecule.verifier import testinfra
@pytest.fixture()
def testinfra_instance(molecule_instance):
return testinfra.Testinfra(molecule_instance)
@pytest.fixture
def patched_code_verifier(mocker):
return mocker.patch('molecule.verifier.testinfra.Testinfra._flake8')
@pytest.fixture
def patched_test_verifier(mocker):
return mocker.patch('molecule.verifier.testinfra.Testinfra._testinfra')
@pytest.fixture
def patched_get_tests(mocker):
return mocker.patch('molecule.verifier.testinfra.Testinfra._get_tests')
@pytest.fixture
def patched_ansible(mocker):
return mocker.patch('molecule.ansible_playbook.AnsiblePlaybook')
def test_execute(mocker, patched_code_verifier, patched_test_verifier,
patched_get_tests, patched_ansible, testinfra_instance):
patched_get_tests.return_value = ['/test/1', '/test/2']
patched_ansible.return_value = mocker.Mock(env={})
testinfra_instance._molecule.args = {'debug': True, 'sudo': True}
testinfra_instance.execute()
patched_code_verifier.assert_called_once_with(['/test/1', '/test/2'])
patched_test_verifier.assert_called_once_with(
['/test/1', '/test/2'],
ansible_inventory='test/inventory_file',
ansible_env={},
connection='ansible',
debug=True,
sudo=True)
def test_execute_no_tests(patched_code_verifier, patched_test_verifier,
patched_get_tests, testinfra_instance):
patched_get_tests.return_value = []
testinfra_instance.execute()
assert not patched_code_verifier.called
assert not patched_test_verifier.called
def test_testinfra(patched_run_command, patched_get_tests, testinfra_instance):
args = ['/tmp/ansible-inventory']
kwargs = {'debug': False, '_out': None, '_err': None}
testinfra_instance._testinfra(*args, **kwargs)
x = sh.testinfra.bake('/tmp/ansible-inventory')
patched_run_command.assert_called_once_with(x, debug=None)
def test_flake8(patched_run_command, testinfra_instance):
args = ['test1.py', 'test2.py']
testinfra_instance._flake8(args)
x = sh.flake8.bake('test1.py', 'test2.py')
patched_run_command.assert_called_once_with(x, debug=None)
def test_get_tests(temp_dir, testinfra_instance):
testinfra_instance._testinfra_dir = temp_dir
dir1 = os.path.join(temp_dir, 'foo')
dir2 = os.path.join(temp_dir, 'foo', 'bar')
os.mkdir(dir1)
os.mkdir(dir2)
test_file = os.path.join(dir2, 'test_default.py')
open(test_file, 'a').close()
assert 1 == len(testinfra_instance._get_tests())
assert test_file == testinfra_instance._get_tests()[0]
| {
"content_hash": "8ce4f32a7d8860c4cb03b63187b05daa",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 79,
"avg_line_length": 30.01123595505618,
"alnum_prop": 0.6918757019842755,
"repo_name": "rgreinho/molecule",
"id": "16a8b596be2567f267fb41cd2ca03201a6d5033b",
"size": "3791",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/unit/verifier/test_testinfra.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315730"
},
{
"name": "Ruby",
"bytes": "1110"
},
{
"name": "Shell",
"bytes": "4029"
}
],
"symlink_target": ""
} |
#!/usr/bin/env python2.7
# Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import hashlib
import itertools
import collections
import os
import sys
import subprocess
import re
import perfection
# configuration: a list of either strings or 2-tuples of strings
# a single string represents a static grpc_mdstr
# a 2-tuple represents a static grpc_mdelem (and appropriate grpc_mdstrs will
# also be created)
CONFIG = [
# metadata strings
'host',
'grpc-timeout',
'grpc-internal-encoding-request',
'grpc-payload-bin',
':path',
'grpc-encoding',
'grpc-accept-encoding',
'user-agent',
':authority',
'grpc-message',
'grpc-status',
'grpc-tracing-bin',
'grpc-stats-bin',
'',
# channel arg keys
'grpc.wait_for_ready',
'grpc.timeout',
'grpc.max_request_message_bytes',
'grpc.max_response_message_bytes',
# well known method names
'/grpc.lb.v1.LoadBalancer/BalanceLoad',
# metadata elements
('grpc-status', '0'),
('grpc-status', '1'),
('grpc-status', '2'),
('grpc-encoding', 'identity'),
('grpc-encoding', 'gzip'),
('grpc-encoding', 'deflate'),
('te', 'trailers'),
('content-type', 'application/grpc'),
(':method', 'POST'),
(':status', '200'),
(':status', '404'),
(':scheme', 'http'),
(':scheme', 'https'),
(':scheme', 'grpc'),
(':authority', ''),
(':method', 'GET'),
(':method', 'PUT'),
(':path', '/'),
(':path', '/index.html'),
(':status', '204'),
(':status', '206'),
(':status', '304'),
(':status', '400'),
(':status', '500'),
('accept-charset', ''),
('accept-encoding', ''),
('accept-encoding', 'gzip, deflate'),
('accept-language', ''),
('accept-ranges', ''),
('accept', ''),
('access-control-allow-origin', ''),
('age', ''),
('allow', ''),
('authorization', ''),
('cache-control', ''),
('content-disposition', ''),
('content-encoding', ''),
('content-language', ''),
('content-length', ''),
('content-location', ''),
('content-range', ''),
('content-type', ''),
('cookie', ''),
('date', ''),
('etag', ''),
('expect', ''),
('expires', ''),
('from', ''),
('host', ''),
('if-match', ''),
('if-modified-since', ''),
('if-none-match', ''),
('if-range', ''),
('if-unmodified-since', ''),
('last-modified', ''),
('lb-token', ''),
('lb-cost-bin', ''),
('link', ''),
('location', ''),
('max-forwards', ''),
('proxy-authenticate', ''),
('proxy-authorization', ''),
('range', ''),
('referer', ''),
('refresh', ''),
('retry-after', ''),
('server', ''),
('set-cookie', ''),
('strict-transport-security', ''),
('transfer-encoding', ''),
('user-agent', ''),
('vary', ''),
('via', ''),
('www-authenticate', ''),
]
METADATA_BATCH_CALLOUTS = [
':path',
':method',
':status',
':authority',
':scheme',
'te',
'grpc-message',
'grpc-status',
'grpc-payload-bin',
'grpc-encoding',
'grpc-accept-encoding',
'content-type',
'grpc-internal-encoding-request',
'user-agent',
'host',
'lb-token',
'lb-cost-bin',
]
COMPRESSION_ALGORITHMS = [
'identity',
'deflate',
'gzip',
]
# utility: mangle the name of a config
def mangle(elem, name=None):
xl = {
'-': '_',
':': '',
'/': 'slash',
'.': 'dot',
',': 'comma',
' ': '_',
}
def m0(x):
if not x: return 'empty'
r = ''
for c in x:
put = xl.get(c, c.lower())
if not put: continue
last_is_underscore = r[-1] == '_' if r else True
if last_is_underscore and put == '_': continue
elif len(put) > 1:
if not last_is_underscore: r += '_'
r += put
r += '_'
else:
r += put
if r[-1] == '_': r = r[:-1]
return r
def n(default, name=name):
if name is None: return 'grpc_%s_' % default
if name == '': return ''
return 'grpc_%s_' % name
if isinstance(elem, tuple):
return '%s%s_%s' % (n('mdelem'), m0(elem[0]), m0(elem[1]))
else:
return '%s%s' % (n('mdstr'), m0(elem))
# utility: generate some hash value for a string
def fake_hash(elem):
return hashlib.md5(elem).hexdigest()[0:8]
# utility: print a big comment block into a set of files
def put_banner(files, banner):
for f in files:
print >>f, '/*'
for line in banner:
print >>f, ' * %s' % line
print >>f, ' */'
print >>f
# build a list of all the strings we need
all_strs = list()
all_elems = list()
static_userdata = {}
# put metadata batch callouts first, to make the check of if a static metadata
# string is a callout trivial
for elem in METADATA_BATCH_CALLOUTS:
if elem not in all_strs:
all_strs.append(elem)
for elem in CONFIG:
if isinstance(elem, tuple):
if elem[0] not in all_strs:
all_strs.append(elem[0])
if elem[1] not in all_strs:
all_strs.append(elem[1])
if elem not in all_elems:
all_elems.append(elem)
else:
if elem not in all_strs:
all_strs.append(elem)
compression_elems = []
for mask in range(1, 1<<len(COMPRESSION_ALGORITHMS)):
val = ','.join(COMPRESSION_ALGORITHMS[alg]
for alg in range(0, len(COMPRESSION_ALGORITHMS))
if (1 << alg) & mask)
elem = ('grpc-accept-encoding', val)
if val not in all_strs:
all_strs.append(val)
if elem not in all_elems:
all_elems.append(elem)
compression_elems.append(elem)
static_userdata[elem] = 1 + (mask | 1)
# output configuration
args = sys.argv[1:]
H = None
C = None
D = None
if args:
if 'header' in args:
H = sys.stdout
else:
H = open('/dev/null', 'w')
if 'source' in args:
C = sys.stdout
else:
C = open('/dev/null', 'w')
if 'dictionary' in args:
D = sys.stdout
else:
D = open('/dev/null', 'w')
else:
H = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../src/core/lib/transport/static_metadata.h'), 'w')
C = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../src/core/lib/transport/static_metadata.c'), 'w')
D = open(os.path.join(
os.path.dirname(sys.argv[0]), '../../../test/core/end2end/fuzzers/hpack.dictionary'), 'w')
# copy-paste copyright notice from this file
with open(sys.argv[0]) as my_source:
copyright = []
for line in my_source:
if line[0] != '#': break
for line in my_source:
if line[0] == '#':
copyright.append(line)
break
for line in my_source:
if line[0] != '#':
break
copyright.append(line)
put_banner([H,C], [line[2:].rstrip() for line in copyright])
hex_bytes = [ord(c) for c in "abcdefABCDEF0123456789"]
def esc_dict(line):
out = "\""
for c in line:
if 32 <= c < 127:
if c != ord('"'):
out += chr(c)
else:
out += "\\\""
else:
out += "\\x%02X" % c
return out + "\""
put_banner([H,C],
"""WARNING: Auto-generated code.
To make changes to this file, change
tools/codegen/core/gen_static_metadata.py, and then re-run it.
See metadata.h for an explanation of the interface here, and metadata.c for
an explanation of what's going on.
""".splitlines())
print >>H, '#ifndef GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H, '#define GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H'
print >>H
print >>H, '#include "src/core/lib/transport/metadata.h"'
print >>H
print >>C, '#include "src/core/lib/transport/static_metadata.h"'
print >>C
print >>C, '#include "src/core/lib/slice/slice_internal.h"'
print >>C
str_ofs = 0
id2strofs = {}
for i, elem in enumerate(all_strs):
id2strofs[i] = str_ofs
str_ofs += len(elem);
def slice_def(i):
return '{.refcount = &grpc_static_metadata_refcounts[%d], .data.refcounted = {g_bytes+%d, %d}}' % (i, id2strofs[i], len(all_strs[i]))
# validate configuration
for elem in METADATA_BATCH_CALLOUTS:
assert elem in all_strs
print >>H, '#define GRPC_STATIC_MDSTR_COUNT %d' % len(all_strs)
print >>H, 'extern const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT];'
for i, elem in enumerate(all_strs):
print >>H, '/* "%s" */' % elem
print >>H, '#define %s (grpc_static_slice_table[%d])' % (mangle(elem).upper(), i)
print >>H
print >>C, 'static uint8_t g_bytes[] = {%s};' % (','.join('%d' % ord(c) for c in ''.join(all_strs)))
print >>C
print >>C, 'static void static_ref(void *unused) {}'
print >>C, 'static void static_unref(grpc_exec_ctx *exec_ctx, void *unused) {}'
print >>C, 'static const grpc_slice_refcount_vtable static_sub_vtable = {static_ref, static_unref, grpc_slice_default_eq_impl, grpc_slice_default_hash_impl};';
print >>H, 'extern const grpc_slice_refcount_vtable grpc_static_metadata_vtable;';
print >>C, 'const grpc_slice_refcount_vtable grpc_static_metadata_vtable = {static_ref, static_unref, grpc_static_slice_eq, grpc_static_slice_hash};';
print >>C, 'static grpc_slice_refcount static_sub_refcnt = {&static_sub_vtable, &static_sub_refcnt};';
print >>H, 'extern grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT];'
print >>C, 'grpc_slice_refcount grpc_static_metadata_refcounts[GRPC_STATIC_MDSTR_COUNT] = {'
for i, elem in enumerate(all_strs):
print >>C, ' {&grpc_static_metadata_vtable, &static_sub_refcnt},'
print >>C, '};'
print >>C
print >>H, '#define GRPC_IS_STATIC_METADATA_STRING(slice) \\'
print >>H, ' ((slice).refcount != NULL && (slice).refcount->vtable == &grpc_static_metadata_vtable)'
print >>H
print >>C, 'const grpc_slice grpc_static_slice_table[GRPC_STATIC_MDSTR_COUNT] = {'
for i, elem in enumerate(all_strs):
print >>C, slice_def(i) + ','
print >>C, '};'
print >>C
print >>H, '#define GRPC_STATIC_METADATA_INDEX(static_slice) \\'
print >>H, ' ((int)((static_slice).refcount - grpc_static_metadata_refcounts))'
print >>H
print >>D, '# hpack fuzzing dictionary'
for i, elem in enumerate(all_strs):
print >>D, '%s' % (esc_dict([len(elem)] + [ord(c) for c in elem]))
for i, elem in enumerate(all_elems):
print >>D, '%s' % (esc_dict([0, len(elem[0])] + [ord(c) for c in elem[0]] +
[len(elem[1])] + [ord(c) for c in elem[1]]))
print >>H, '#define GRPC_STATIC_MDELEM_COUNT %d' % len(all_elems)
print >>H, 'extern grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT];'
print >>H, 'extern uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT];'
for i, elem in enumerate(all_elems):
print >>H, '/* "%s": "%s" */' % elem
print >>H, '#define %s (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[%d], GRPC_MDELEM_STORAGE_STATIC))' % (mangle(elem).upper(), i)
print >>H
print >>C, 'uintptr_t grpc_static_mdelem_user_data[GRPC_STATIC_MDELEM_COUNT] = {'
print >>C, ' %s' % ','.join('%d' % static_userdata.get(elem, 0) for elem in all_elems)
print >>C, '};'
print >>C
def str_idx(s):
for i, s2 in enumerate(all_strs):
if s == s2:
return i
def md_idx(m):
for i, m2 in enumerate(all_elems):
if m == m2:
return i
def offset_trials(mink):
yield 0
for i in range(1, 100):
for mul in [-1, 1]:
yield mul * i
def perfect_hash(keys, name):
p = perfection.hash_parameters(keys)
def f(i, p=p):
i += p.offset
x = i % p.t
y = i / p.t
return x + p.r[y]
return {
'PHASHRANGE': p.t - 1 + max(p.r),
'PHASHNKEYS': len(p.slots),
'pyfunc': f,
'code': """
static const int8_t %(name)s_r[] = {%(r)s};
static uint32_t %(name)s_phash(uint32_t i) {
i %(offset_sign)s= %(offset)d;
uint32_t x = i %% %(t)d;
uint32_t y = i / %(t)d;
uint32_t h = x;
if (y < GPR_ARRAY_SIZE(%(name)s_r)) {
uint32_t delta = (uint32_t)%(name)s_r[y];
h += delta;
}
return h;
}
""" % {
'name': name,
'r': ','.join('%d' % (r if r is not None else 0) for r in p.r),
't': p.t,
'offset': abs(p.offset),
'offset_sign': '+' if p.offset > 0 else '-'
}
}
elem_keys = [str_idx(elem[0]) * len(all_strs) + str_idx(elem[1]) for elem in all_elems]
elem_hash = perfect_hash(elem_keys, "elems")
print >>C, elem_hash['code']
keys = [0] * int(elem_hash['PHASHRANGE'])
idxs = [255] * int(elem_hash['PHASHNKEYS'])
for i, k in enumerate(elem_keys):
h = elem_hash['pyfunc'](k)
assert keys[h] == 0
keys[h] = k
idxs[h] = i
print >>C, 'static const uint16_t elem_keys[] = {%s};' % ','.join('%d' % k for k in keys)
print >>C, 'static const uint8_t elem_idxs[] = {%s};' % ','.join('%d' % i for i in idxs)
print >>C
print >>H, 'grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b);'
print >>C, 'grpc_mdelem grpc_static_mdelem_for_static_strings(int a, int b) {'
print >>C, ' if (a == -1 || b == -1) return GRPC_MDNULL;'
print >>C, ' uint32_t k = (uint32_t)(a * %d + b);' % len(all_strs)
print >>C, ' uint32_t h = elems_phash(k);'
print >>C, ' return elem_keys[h] == k ? GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[elem_idxs[h]], GRPC_MDELEM_STORAGE_STATIC) : GRPC_MDNULL;'
print >>C, '}'
print >>C
print >>C, 'grpc_mdelem_data grpc_static_mdelem_table[GRPC_STATIC_MDELEM_COUNT] = {'
for a, b in all_elems:
print >>C, '{%s,%s},' % (slice_def(str_idx(a)), slice_def(str_idx(b)))
print >>C, '};'
print >>H, 'typedef enum {'
for elem in METADATA_BATCH_CALLOUTS:
print >>H, ' %s,' % mangle(elem, 'batch').upper()
print >>H, ' GRPC_BATCH_CALLOUTS_COUNT'
print >>H, '} grpc_metadata_batch_callouts_index;'
print >>H
print >>H, 'typedef union {'
print >>H, ' struct grpc_linked_mdelem *array[GRPC_BATCH_CALLOUTS_COUNT];'
print >>H, ' struct {'
for elem in METADATA_BATCH_CALLOUTS:
print >>H, ' struct grpc_linked_mdelem *%s;' % mangle(elem, '').lower()
print >>H, ' } named;'
print >>H, '} grpc_metadata_batch_callouts;'
print >>H
print >>H, '#define GRPC_BATCH_INDEX_OF(slice) \\'
print >>H, ' (GRPC_IS_STATIC_METADATA_STRING((slice)) ? (grpc_metadata_batch_callouts_index)GPR_CLAMP(GRPC_STATIC_METADATA_INDEX((slice)), 0, GRPC_BATCH_CALLOUTS_COUNT) : GRPC_BATCH_CALLOUTS_COUNT)'
print >>H
print >>H, 'extern const uint8_t grpc_static_accept_encoding_metadata[%d];' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, 'const uint8_t grpc_static_accept_encoding_metadata[%d] = {' % (1 << len(COMPRESSION_ALGORITHMS))
print >>C, '0,%s' % ','.join('%d' % md_idx(elem) for elem in compression_elems)
print >>C, '};'
print >>C
print >>H, '#define GRPC_MDELEM_ACCEPT_ENCODING_FOR_ALGORITHMS(algs) (GRPC_MAKE_MDELEM(&grpc_static_mdelem_table[grpc_static_accept_encoding_metadata[(algs)]], GRPC_MDELEM_STORAGE_STATIC))'
print >>H, '#endif /* GRPC_CORE_LIB_TRANSPORT_STATIC_METADATA_H */'
H.close()
C.close()
| {
"content_hash": "b9839defc6fdd22f2647e471d0976351",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 199,
"avg_line_length": 31.878486055776893,
"alnum_prop": 0.6082609510716741,
"repo_name": "soltanmm-google/grpc",
"id": "0374cf75a1a17c548b950d6f71bba83c71fd5572",
"size": "16003",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/codegen/core/gen_static_metadata.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "25583"
},
{
"name": "C",
"bytes": "6359547"
},
{
"name": "C#",
"bytes": "1483423"
},
{
"name": "C++",
"bytes": "1804809"
},
{
"name": "CMake",
"bytes": "329667"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "353380"
},
{
"name": "M4",
"bytes": "38393"
},
{
"name": "Makefile",
"bytes": "734314"
},
{
"name": "Objective-C",
"bytes": "310155"
},
{
"name": "PHP",
"bytes": "152017"
},
{
"name": "Protocol Buffer",
"bytes": "114660"
},
{
"name": "PureBasic",
"bytes": "147"
},
{
"name": "Python",
"bytes": "1327113"
},
{
"name": "Ruby",
"bytes": "623060"
},
{
"name": "Shell",
"bytes": "56454"
},
{
"name": "Swift",
"bytes": "5418"
}
],
"symlink_target": ""
} |
from django.core.urlresolvers import reverse, NoReverseMatch
class Menu:
submenus = ()
def shown(self):
"""
All menus are shown by default.
Override this method to implement custom behavior.
"""
return True
def url(self):
"""
Try to reverse `url_name`, fallback to '#' if not possible.
"""
try:
return reverse(self.url_name)
except AttributeError:
return '#'
except NoReverseMatch:
raise
def is_active(self):
"""
A menu is active either if its `url_name` is the current or if
any of its `submenus` are active.
"""
url = sub_urls = False
if hasattr(self, 'url_name'):
url = reverse(self.url_name) == self.context['request'].path
if hasattr(self, 'submenus'):
sub_urls = any([s.is_active() for s in self.submenus])
return url or sub_urls
| {
"content_hash": "784a1cde4960df8d37f6f3744df63bd8",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 72,
"avg_line_length": 26.944444444444443,
"alnum_prop": 0.5474226804123712,
"repo_name": "hatchrbr/django-alacarte",
"id": "35ecea45a2ad8ec9aea85bd6adb32ba3761b5f67",
"size": "970",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alacarte/menus.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "436"
},
{
"name": "Python",
"bytes": "6708"
}
],
"symlink_target": ""
} |
import os
import sys
import pytest
import glob
import json
import numpy as np
import sh
def assert_close(current, expected):
np.testing.assert_allclose(current, expected, atol=1e-2, rtol=1e-2)
def check_reference_feature_json(output, reference_file):
"""Compare JSON to a reference using knowledge about its contents.
Parameters
----------
output : iterable of string
The output being tested. Each string must contain valid JSON.
reference_file : iterable of string
The reference against which the output is being compared.
"""
for line, reference in zip(output, reference_file):
if not line and reference == '\n':
continue # ignore blank lines
d = json.loads(line)
dref = json.loads(reference)
if 'feature_vector' in d:
assert_close(d['feature_vector'], dref['feature_vector'])
elif 'pca_vector' in d:
# 'pca_vector' and 'feature_vector_std' are emitted in the same
# line of JSON, so we only check for one in `d` and assume the
# other is there.
assert_close(d['feature_vector_std'], dref['feature_vector_std'])
assert_close(np.divide(*d['pca_vector']),
np.divide(*dref['pca_vector']))
elif 'neighbours' in d:
assert set(d['neighbours']) == set(dref['neighbours'])
@pytest.fixture
def env():
"""Return dictionary with useful directories to run tests
Returns
-------
dirs : dict
A dictionary with directories pointing to 'bin' (where to find
the mic "binary"), 'testdata' (the location of test data other
than images), and 'images' (the location of test images).
Additionally, the dictionary contains 'env', an environment to
run ``sh``
"""
dirs = {}
curdir = os.path.dirname(__file__)
dirs['root'] = os.path.abspath(os.path.join(curdir, '..'))
dirs['bindir'] = os.path.abspath(os.path.join(dirs['root'], 'bin'))
dirs['bin'] = os.path.join(dirs['bindir'], 'mic')
env_copy = os.environ.copy()
env_copy['PATH'] = ':'.join([dirs['bin'], os.environ['PATH']])
env_copy['PYTHONPATH'] = ':'.join([dirs['root']] + sys.path)
env_copy['PYTHONWARNINGS'] = 'ignore'
dirs['env'] = env_copy
dirs['testdata'] = os.path.join(curdir, 'testdata')
dirs['images'] = os.path.join(dirs['testdata'], 'images')
return dirs
def test_features(env):
images = sorted(glob.glob(os.path.join(env['images'], '*.tif')))
mic = sh.Command(env['bin'])
out = mic.features(*images, S=20, n=2, s='myores', b=8,
random_seed=0, _env=env['env'])
ref = open(os.path.join(env['testdata'], 'emitted-features.json'))
check_reference_feature_json(out.split('\n'), ref)
def test_features_single_threshold(env):
images = sorted(glob.glob(os.path.join(env['images'], '*.tif')))
mic = sh.Command(env['bin'])
out = mic.features(*images, S=20, n=2, s='myores', b=8, G=True,
random_seed=0, _env=env['env'])
ref = open(os.path.join(env['testdata'], 'emitted-features-global-t.json'))
check_reference_feature_json(out.split('\n'), ref)
| {
"content_hash": "44dde8a216e3be22f2cd3398ef77502d",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 79,
"avg_line_length": 36.91954022988506,
"alnum_prop": 0.6080323785803238,
"repo_name": "Don86/microscopium",
"id": "e3176da463b92e2216ea3e24c1dc0ec2ea0700e5",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_cli.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "828"
},
{
"name": "Python",
"bytes": "160097"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
from functools import partial
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python import keras
from tensorflow.python.ops import init_ops_v2
from odin.backend.alias import (parse_activation, parse_constraint,
parse_initializer, parse_regularizer)
from odin.bay.helpers import coercible_tensor
class StochasticVariable(keras.layers.Layer, tf.initializers.Initializer):
def __init__(self, sample_shape=(), seed=None):
super().__init__()
self._sample_shape = sample_shape
self._seed = seed
@property
def sample_shape(self):
return self._sample_shape
@sample_shape.setter
def sample_shape(self, shape):
self._sample_shape = shape
def __call__(self, shape, dtype=None):
if not self.built:
self.build(shape, dtype)
distribution = self.call()
assert isinstance(distribution, tfp.distributions.Distribution), \
'StochasticVariable.call must return Distribution'
distribution = coercible_tensor(distribution,
convert_to_tensor_fn=partial(
tfp.distributions.Distribution.sample,
sample_shape=self.sample_shape))
return distribution
class TrainableNormal(StochasticVariable):
def __init__(self,
loc_initializer='truncated_normal',
scale_initializer='truncated_normal',
loc_regularizer=None,
scale_regularizer=None,
loc_activation=None,
scale_activation='softplus',
shared_scale=False,
**kwargs):
super().__init__(**kwargs)
self.loc_initializer = parse_initializer(loc_initializer, 'tf')
self.scale_initializer = parse_initializer(scale_initializer, 'tf')
self.loc_regularizer = parse_regularizer(loc_regularizer, 'tf')
self.scale_regularizer = parse_regularizer(scale_regularizer, 'tf')
self.loc_activation = parse_activation(loc_activation, 'tf')
self.scale_activation = parse_activation(scale_activation, 'tf')
self.shared_scale = bool(shared_scale)
def build(self, shape, dtype=None):
super().build(shape)
self.loc = self.add_weight(
name='loc',
shape=shape,
dtype=dtype,
initializer=self.loc_initializer,
regularizer=self.loc_regularizer,
constraint=None,
trainable=True,
)
self.scale = self.add_weight(
name='scale',
shape=() if self.shared_scale else shape,
dtype=dtype,
initializer=self.scale_initializer,
regularizer=self.scale_regularizer,
constraint=None,
trainable=True,
)
def call(self):
dist = tfp.distributions.Independent(
tfp.distributions.Normal(loc=self.loc_activation(self.loc),
scale=self.scale_activation(self.scale)), 1)
return dist
class TrainableNormalSharedScale(TrainableNormal):
def __init__(self,
loc_initializer='glorot_normal',
scale_initializer='truncated_normal',
loc_regularizer=None,
scale_regularizer=None,
loc_activation=None,
scale_activation='softplus',
**kwargs):
super().__init__(loc_initializer,
scale_initializer,
loc_regularizer,
scale_regularizer,
loc_activation,
scale_activation,
shared_scale=True,
**kwargs)
trainable_normal = TrainableNormal
trainable_normal_shared_scale = TrainableNormalSharedScale
# NOTE: this only hijack the keras.initializers if you import odin.bay
init_ops_v2.trainable_normal = TrainableNormal
init_ops_v2.trainable_normal_shared_scale = TrainableNormalSharedScale
get = keras.initializers.get
| {
"content_hash": "42dc056b40ba1d7cabf31766a438a03c",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 33.75423728813559,
"alnum_prop": 0.627165453175998,
"repo_name": "imito/odin",
"id": "df53aa983f69dbcda304fc22e3ee2239f80e0b5a",
"size": "3983",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "odin/bay/stochastic_initializers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1516670"
}
],
"symlink_target": ""
} |
import socket,sys
host = ''
port = 55055
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(1)
print "Server is running on port %d; press Ctrl-C to terminate." % port
clientsock,addr = s.accept()
recvfromclientbuf = clientsock.recv(2048)
if 0 < len(recvfromclientbuf):
sys.stdout.write(recvfromclientbuf)
print "Client IP is:", addr
replymessage = "HI, I am Server!!! \r\n"
clientsock.send(replymessage)
clientsock.close()
s.close()
| {
"content_hash": "3fe24d441721377125375deddb98b05a",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 71,
"avg_line_length": 24.772727272727273,
"alnum_prop": 0.710091743119266,
"repo_name": "NineLamas/pwq",
"id": "f66e987de00cd5d5962450f556b844dd77a5b539",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/socket_server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "7894"
}
],
"symlink_target": ""
} |
import arcpy, collections
from Parcel import Parcel
from LegacyCountyStats import *
import json
import webbrowser
from ConfigParser import ConfigParser
class Summary:
def __init__(self):
pass #placeholder
def writeSummaryTxt(Summary,outDirTxt,outName,totError,outPage,outJSON):
try:
Validation_JSON = {
'County_Info':{
'CO_NAME': totError.coName,
'Total_Records': totError.recordTotalCount,
'Legacy': eval((totError.coName).replace(" ","_") + "LegacyDict")
},
'inLineErrors':{
'General_Errors': str(totError.generalErrorCount),
'Geometric_Errors': str(totError.geometricErrorCount),
'Address_Errors': str(totError.addressErrorCount),
'Tax_Errors': str(totError.taxErrorCount)
},
'broadLevelErrors':{
'Geometric_Misplacement_Flag':[],
'Geometric_File_Error':[],
'Coded_Domain_Fields': ', '.join(totError.codedDomainfields)
},
'Tax_Roll_Years_Pcnt':{
'Previous_Taxroll_Year': str(round((float(totError.trYearPast / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)),
'Expected_Taxroll_Year': str(round((float(totError.trYearExpected / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)),
'Future_Taxroll_Years': str(round((float(totError.trYearFuture / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)),
'Other_Taxroll_Years': str(round((float(totError.trYearOther / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2))
},
'Records_Missing':{
'Missing_CONAME': str(totError.coNameMiss),
'Missing_PARCELFIPS': str(totError.fipsMiss),
'Missing_PARCELSRC': str(totError.srcMiss)
},
'Fields_Diffs':{
'PARCELID': str(totError.comparisonDict["PARCELID"]),
'TAXPARCELID': str(totError.comparisonDict["TAXPARCELID"]),
'PARCELDATE': str(totError.comparisonDict["PARCELDATE"]),
'TAXROLLYEAR': str(totError.comparisonDict["TAXROLLYEAR"]),
'OWNERNME1': str(totError.comparisonDict["OWNERNME1"]),
'OWNERNME2': str(totError.comparisonDict["OWNERNME2"]),
'PSTLADRESS': str(totError.comparisonDict["PSTLADRESS"]),
'SITEADRESS': str(totError.comparisonDict["SITEADRESS"]),
'ADDNUMPREFIX': str(totError.comparisonDict["ADDNUMPREFIX"]),
'ADDNUM': str(totError.comparisonDict["ADDNUM"]),
'ADDNUMSUFFIX': str(totError.comparisonDict["ADDNUMSUFFIX"]),
'PREFIX': str(totError.comparisonDict["PREFIX"]),
'STREETNAME': str(totError.comparisonDict["STREETNAME"]),
'STREETTYPE': str(totError.comparisonDict["STREETTYPE"]),
'SUFFIX': str(totError.comparisonDict["SUFFIX"]),
'LANDMARKNAME': str(totError.comparisonDict["LANDMARKNAME"]),
'UNITTYPE': str(totError.comparisonDict["UNITTYPE"]),
'UNITID': str(totError.comparisonDict["UNITID"]),
'PLACENAME': str(totError.comparisonDict["PLACENAME"]),
'ZIPCODE': str(totError.comparisonDict["ZIPCODE"]),
'ZIP4': str(totError.comparisonDict["ZIP4"]),
'SCHOOLDIST': str(totError.comparisonDict["SCHOOLDIST"]),
'SCHOOLDISTNO': str(totError.comparisonDict["SCHOOLDISTNO"]),
#'IMPROVED': str(totError.comparisonDict["IMPROVED"]),
'CNTASSDVALUE': str(totError.comparisonDict["CNTASSDVALUE"]),
'LNDVALUE': str(totError.comparisonDict["LNDVALUE"]),
'IMPVALUE': str(totError.comparisonDict["IMPVALUE"]),
#'FORESTVALUE': str(totError.comparisonDict["FORESTVALUE"]),
'ESTFMKVALUE': str(totError.comparisonDict["ESTFMKVALUE"]),
'NETPRPTA': str(totError.comparisonDict["NETPRPTA"]),
'GRSPRPTA': str(totError.comparisonDict["GRSPRPTA"]),
'PROPCLASS': str(totError.comparisonDict["PROPCLASS"]),
'AUXCLASS': str(totError.comparisonDict["AUXCLASS"]),
'ASSDACRES': str(totError.comparisonDict["ASSDACRES"]),
'DEEDACRES': str(totError.comparisonDict["DEEDACRES"]),
'GISACRES': str(totError.comparisonDict["GISACRES"]),
'CONAME': str(totError.comparisonDict["CONAME"]),
'PARCELFIPS': str(totError.comparisonDict["PARCELFIPS"]),
'PARCELSRC': str(totError.comparisonDict["PARCELSRC"])
}
}
Summary.errorSummaryFile = open(outDirTxt + "/" + outName + "_ValidationSummary.txt","w")
("Creating Validation Summary here: " + outDirTxt + "/" + outName + "_ValidationSummary.txt")
Summary.errorSummaryFile.write(outDirTxt + "\\" + outName + "_ValidationSummary.txt" + "\n")
Summary.errorSummaryFile.write("Validation Summary Table: " + "\n")
Summary.errorSummaryFile.write("This validation summary table contains an overview of any errors found by the Parcel Validation Tool. Please review the contents of this file and make changes to your parcel dataset as necessary." + "\n\n")
Summary.errorSummaryFile.write("************************************************************************\n")
Summary.errorSummaryFile.write("* In-line errors\n")
Summary.errorSummaryFile.write("************************************************************************\n")
Summary.errorSummaryFile.write("The following lines summarized the element-specific errors that were found while validating your parcel dataset. The stats below are meant as a means of reviewing the output. Please see the GeneralElementErrors, AddressElementErrors, TaxrollElementErrors, and GeometricElementErrors fields to address these errors individually.\n")
Summary.errorSummaryFile.write(" General Errors: " + str(totError.generalErrorCount) + "\n")
Summary.errorSummaryFile.write(" Geometric Errors: " + str(totError.geometricErrorCount) + "\n")
Summary.errorSummaryFile.write(" Address Errors: " + str(totError.addressErrorCount) + "\n")
Summary.errorSummaryFile.write(" Tax Errors: " + str(totError.taxErrorCount) + "\n")
Summary.errorSummaryFile.write("\n\n")
Summary.errorSummaryFile.write("************************************************************************\n")
Summary.errorSummaryFile.write("* Broad-level errors:\n")
Summary.errorSummaryFile.write("************************************************************************\n")
Summary.errorSummaryFile.write("The following lines explain any broad geometric errors that were found while validating your parcel dataset."+ "\n")
if len(totError.geometricPlacementErrors) != 0:
for geometricErrorMessage in totError.geometricPlacementErrors:
Summary.errorSummaryFile.write(" Geometric Misplacement Flag: " + str(geometricErrorMessage) + "\n")
Validation_JSON["broadLevelErrors"]['Geometric_Misplacement_Flag'].append(str(geometricErrorMessage))
if len(totError.geometricFileErrors) != 0:
for geometricErrorMessage in totError.geometricFileErrors:
Summary.errorSummaryFile.write(" Geometric File Error: " + str(geometricErrorMessage) + "\n")
Validation_JSON["broadLevelErrors"]['Geometric_File_Error'].append(str(geometricErrorMessage))
if (len(totError.geometricFileErrors) == 0) and (len(totError.geometricPlacementErrors) == 0):
Summary.errorSummaryFile.write(" *No broad-level geometric errors found!" + "\n")
Validation_JSON["broadLevelErrors"]['Geometric_File_Error'].append("None")
Validation_JSON["broadLevelErrors"]['Geometric_Misplacement_Flag'].append("None")
Summary.errorSummaryFile.write("\n\n")
Summary.errorSummaryFile.write("Percentage of records with various Taxroll Years" + "\n")
Summary.errorSummaryFile.write(" Previous Taxroll Year: " + str(round((float(totError.trYearPast / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)) + "%\n")
Summary.errorSummaryFile.write(" Expected Taxroll Year: " + str(round((float(totError.trYearExpected / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)) + "%\n")
Summary.errorSummaryFile.write(" Future Taxroll Years: " + str(round((float(totError.trYearFuture / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)) + "%\n")
Summary.errorSummaryFile.write(" Other Taxroll Years: " + str(round((float(totError.trYearOther / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2)) + "%\n")
Summary.errorSummaryFile.write("\n\n")
Summary.errorSummaryFile.write("Records missing CONAME, PARCELFIPS, or PARCELSOURCE" + "\n")
Summary.errorSummaryFile.write(" Missing CONAME: " + str(totError.coNameMiss) + "\n")
Summary.errorSummaryFile.write(" Missing PARCELFIPS: " + str(totError.fipsMiss) + "\n")
Summary.errorSummaryFile.write(" Missing PARCELSRC: " + str(totError.srcMiss) + "\n\n")
Summary.errorSummaryFile.write("If any of the above values are greater than 0, please add missing values. These 3 fields should be populated for all records submitted.\n\n\n")
Summary.errorSummaryFile.write("BELOW IS A COMPARISON OF COMPLETENESS VALUES FROM YOUR PREVIOUS PARCEL SUBMISSION AND THIS CURRENT SUBMISSION.\n")
Summary.errorSummaryFile.write("-->If the value shown is a seemingly large negative number, please verify that all data was joined correctly and no data was lost during processing.\n")
Summary.errorSummaryFile.write("-->Note: This does not necessarily mean your data is incorrect, we just want to highlight large discrepancies that could indicate missing or incorrect data.\n\n")
Summary.errorSummaryFile.write(" FIELD DIFFERENCE\n")
Summary.errorSummaryFile.write(" ------ ----------\n")
Summary.errorSummaryFile.write(" PARCELID: " + str(totError.comparisonDict["PARCELID"]) + '\n')
Summary.errorSummaryFile.write(" TAXPARCELID: " + str(totError.comparisonDict["TAXPARCELID"]) + '\n')
Summary.errorSummaryFile.write(" PARCELDATE: " + str(totError.comparisonDict["PARCELDATE"]) + '\n')
Summary.errorSummaryFile.write(" TAXROLLYEAR: " + str(totError.comparisonDict["TAXROLLYEAR"]) + '\n')
Summary.errorSummaryFile.write(" OWNERNME1: " + str(totError.comparisonDict["OWNERNME1"]) + '\n')
Summary.errorSummaryFile.write(" OWNERNME2: " + str(totError.comparisonDict["OWNERNME2"]) + '\n')
Summary.errorSummaryFile.write(" PSTLADRESS: " + str(totError.comparisonDict["PSTLADRESS"]) + '\n')
Summary.errorSummaryFile.write(" SITEADRESS: " + str(totError.comparisonDict["SITEADRESS"]) + '\n')
Summary.errorSummaryFile.write(" ADDNUMPREFIX: " + str(totError.comparisonDict["ADDNUMPREFIX"]) + '\n')
Summary.errorSummaryFile.write(" ADDNUM: " + str(totError.comparisonDict["ADDNUM"]) + '\n')
Summary.errorSummaryFile.write(" ADDNUMSUFFIX: " + str(totError.comparisonDict["ADDNUMSUFFIX"]) + '\n')
Summary.errorSummaryFile.write(" PREFIX: " + str(totError.comparisonDict["PREFIX"]) + '\n')
Summary.errorSummaryFile.write(" STREETNAME: " + str(totError.comparisonDict["STREETNAME"]) + '\n')
Summary.errorSummaryFile.write(" STREETTYPE: " + str(totError.comparisonDict["STREETTYPE"]) + '\n')
Summary.errorSummaryFile.write(" SUFFIX: " + str(totError.comparisonDict["SUFFIX"]) + '\n')
Summary.errorSummaryFile.write(" LANDMARKNAME: " + str(totError.comparisonDict["LANDMARKNAME"]) + '\n')
Summary.errorSummaryFile.write(" UNITTYPE: " + str(totError.comparisonDict["UNITTYPE"]) + '\n')
Summary.errorSummaryFile.write(" UNITID: " + str(totError.comparisonDict["UNITID"]) + '\n')
Summary.errorSummaryFile.write(" PLACENAME: " + str(totError.comparisonDict["PLACENAME"]) + '\n')
Summary.errorSummaryFile.write(" ZIPCODE: " + str(totError.comparisonDict["ZIPCODE"]) + '\n')
Summary.errorSummaryFile.write(" ZIP4: " + str(totError.comparisonDict["ZIP4"]) + '\n')
Summary.errorSummaryFile.write(" SCHOOLDIST: " + str(totError.comparisonDict["SCHOOLDIST"]) + '\n')
Summary.errorSummaryFile.write(" SCHOOLDISTNO: " + str(totError.comparisonDict["SCHOOLDISTNO"]) + '\n')
#Summary.errorSummaryFile.write(" IMPROVED: " + str(totError.comparisonDict["IMPROVED"]) + '\n')
Summary.errorSummaryFile.write(" CNTASSDVALUE: " + str(totError.comparisonDict["CNTASSDVALUE"]) + '\n')
Summary.errorSummaryFile.write(" LNDVALUE: " + str(totError.comparisonDict["LNDVALUE"]) + '\n')
Summary.errorSummaryFile.write(" IMPVALUE: " + str(totError.comparisonDict["IMPVALUE"]) + '\n')
#Summary.errorSummaryFile.write(" FORESTVALUE: " + str(totError.comparisonDict["FORESTVALUE"]) + '\n')
Summary.errorSummaryFile.write(" ESTFMKVALUE: " + str(totError.comparisonDict["ESTFMKVALUE"]) + '\n')
Summary.errorSummaryFile.write(" NETPRPTA: " + str(totError.comparisonDict["NETPRPTA"]) + '\n')
Summary.errorSummaryFile.write(" GRSPRPTA: " + str(totError.comparisonDict["GRSPRPTA"]) + '\n')
Summary.errorSummaryFile.write(" PROPCLASS: " + str(totError.comparisonDict["PROPCLASS"]) + '\n')
Summary.errorSummaryFile.write(" AUXCLASS: " + str(totError.comparisonDict["AUXCLASS"]) + '\n')
Summary.errorSummaryFile.write(" ASSDACRES: " + str(totError.comparisonDict["ASSDACRES"]) + '\n')
Summary.errorSummaryFile.write(" DEEDACRES: " + str(totError.comparisonDict["DEEDACRES"]) + '\n')
Summary.errorSummaryFile.write(" GISACRES: " + str(totError.comparisonDict["GISACRES"]) + '\n')
Summary.errorSummaryFile.write(" CONAME: " + str(totError.comparisonDict["CONAME"]) + '\n')
Summary.errorSummaryFile.write(" PARCELFIPS: " + str(totError.comparisonDict["PARCELFIPS"]) + '\n')
Summary.errorSummaryFile.write(" PARCELSRC: " + str(totError.comparisonDict["PARCELSRC"]) + '\n')
Summary.errorSummaryFile.write("\n\n\n* Within: " + outDirTxt + "\\" + outName + "\n")
Summary.errorSummaryFile.write("************************************************************************\n")
Summary.errorSummaryFile.close()
# outJSON - # full (hard coded) path to the output .json file summary.js
with open(outJSON, 'w') as outfile:
outfile.write("var testValues = ")
json.dump(Validation_JSON, outfile)
except Exception as e:
arcpy.AddMessage("!!!!!!!!!!Error writing summary file!!!!!!!!!!")
arcpy.AddMessage(str(e))
webbrowser.open(outPage, new=2)
def writeIniFile(self,inputDict,totError):
arcpy.AddMessage("\n")
arcpy.AddMessage("Creating .ini file")
config = ConfigParser()
config.add_section('PARAMETERS')
for key in inputDict.keys():
config.set('PARAMETERS',key,inputDict[key])
if inputDict['isSearchable'] == 'true':
config.add_section('ERROR COUNTS')
config.set('ERROR COUNTS','General',totError.generalErrorCount)
config.set('ERROR COUNTS','Geometric',totError.geometricErrorCount)
config.set('ERROR COUNTS','Address',totError.addressErrorCount)
config.set('ERROR COUNTS','Tax',totError.taxErrorCount)
config.add_section('PERCENT TAXROLL YEAR')
config.set('PERCENT TAXROLL YEAR','Previous',round((float(totError.trYearPast / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2))
config.set('PERCENT TAXROLL YEAR','Expected',round((float(totError.trYearExpected / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2))
config.set('PERCENT TAXROLL YEAR','Future',round((float(totError.trYearFuture / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2))
config.set('PERCENT TAXROLL YEAR','Other',round((float(totError.trYearOther / float((totError.recordTotalCount - totError.pinSkipCount)))*100),2))
config.add_section('MISSING RECORDS')
config.set('MISSING RECORDS','CONAME',totError.coNameMiss)
config.set('MISSING RECORDS','PARCELFIPS',totError.fipsMiss)
config.set('MISSING RECORDS','PARCELSOURCE',totError.srcMiss)
config.add_section('COMPARISON COMPLETENESS')
for field in totError.comparisonDict.keys():
if field != 'state' or field != 'loaddate':
config.set('COMPARISON COMPLETENESS',field,totError.comparisonDict[field])
try:
#Write out .ini file
with open(inputDict['outINIDir']+'/'+inputDict['county'] +'.ini','w') as configFile:
config.write(configFile)
with open(inputDict['inCert'],'r') as certFile:
for line in certFile:
configFile.write(line)
arcpy.AddMessage("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
arcpy.AddMessage("Wrote .ini file to "+inputDict['outINIDir'])
arcpy.AddMessage("\n")
arcpy.AddMessage("SUBMISSIONS WITHOUT .ini WILL NOT BE ACCEPTED!")
arcpy.AddMessage("\n")
arcpy.AddMessage("------> .ini FILE CREATION COMPLETE! GREAT WORK!! <------\n\n")
arcpy.AddMessage("NOW, ZIP UP THE .ini FILE, THE PARCEL FILE GEODATABASE, THE OTHER_LAYERS FILE GEODATABASE, AND SUBMIT TO wisedecade.legis.wisconsin.gov")
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n")
except Exception as e:
arcpy.AddMessage("!!!!!!!!!!Error writing .ini file!!!!!!!!!!")
arcpy.AddMessage(str(e))
def explainCertComplete(self,inFile):
fhand = open(inFile, 'r')
count = 0
for line in fhand:
if len(line.strip()) <> 0:
count += 1
if count < 3:
arcpy.AddMessage("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
arcpy.AddMessage(" IMMEDIATE ISSUE REQUIRING ATTENTION")
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n")
arcpy.AddMessage("IT DOESN'T APPEAR THAT YOU FULLY FILLED OUT THE EXPLAIN-CERTIFY FILE REQUIRED FOR SUBMISSION.\n\n")
arcpy.AddMessage("PLEASE FILL OUT THIS FILE IN IT'S ENTIRETY AND RE-RUN THE TOOL IN FINAL MODE.")
arcpy.AddMessage("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
exit()
def fieldConstraints(self,totError):
if totError.coNameMiss > 0 or totError.fipsMiss > 0 or totError.srcMiss > 0:
arcpy.AddMessage("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
arcpy.AddMessage(" IMMEDIATE ISSUE REQUIRING ATTENTION")
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n\n")
arcpy.AddMessage("ONE OF THE FOLLOWING FIELDS: CONAME, PARCELSRC or PARCELFIPS ARE NOT FULLY POPULATED.\n\n")
arcpy.AddMessage("THESE FIELDS SHOULD BE POPULATED FOR EVERY RECORD IN THE SUMBITTED PARCEL FEATURE CLASS.\n\n")
arcpy.AddMessage("PLEASE ENSURE THESE FIELDS ARE POPULATED FOR ALL RECORDS AND RE-RUN THE TOOL IN FINAL MODE.")
arcpy.AddMessage("\n\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
arcpy.AddMessage("\n!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n")
exit()
| {
"content_hash": "c0916fab353a4477b0d352fe5d5ea8b5",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 368,
"avg_line_length": 72.08203125,
"alnum_prop": 0.6737115916111202,
"repo_name": "WIStCart/V3ValidationTool",
"id": "c539b9b5f5001beb478e46100e6c279a5f57a71d",
"size": "18453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "V6ValidationTool_dist/script/Summary.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "317169"
}
],
"symlink_target": ""
} |
import logging
import time
from parsl.providers.kubernetes.template import template_string
logger = logging.getLogger(__name__)
from parsl.providers.error import *
from parsl.providers.provider_base import ExecutionProvider
from parsl.utils import RepresentationMixin
try:
from kubernetes import client, config
_kubernetes_enabled = True
except (ImportError, NameError, FileNotFoundError):
_kubernetes_enabled = False
class KubernetesProvider(ExecutionProvider, RepresentationMixin):
""" Kubernetes execution provider
Parameters
----------
namespace : str
Kubernetes namespace to create deployments.
image : str
Docker image to use in the deployment.
channel : Channel
Channel for accessing this provider. Possible channels include
:class:`~parsl.channels.LocalChannel` (the default),
:class:`~parsl.channels.SSHChannel`, or
:class:`~parsl.channels.SSHInteractiveLoginChannel`.
nodes_per_block : int
Nodes to provision per block.
init_blocks : int
Number of blocks to provision at the start of the run. Default is 1.
min_blocks : int
Minimum number of blocks to maintain.
max_blocks : int
Maximum number of blocks to maintain.
parallelism : float
Ratio of provisioned task slots to active tasks. A parallelism value of 1 represents aggressive
scaling where as many resources as possible are used; parallelism close to 0 represents
the opposite situation in which as few resources as possible (i.e., min_blocks) are used.
worker_init : str
Command to be run first for the workers, such as `python start.py`.
secret : str
Docker secret to use to pull images
user_id : str
Unix user id to run the container as.
group_id : str
Unix group id to run the container as.
run_as_non_root : bool
Run as non-root (True) or run as root (False).
persistent_volumes: list[(str, str)]
List of tuples describing persistent volumes to be mounted in the pod.
The tuples consist of (PVC Name, Mount Directory).
"""
def __init__(self,
image,
namespace='default',
channel=None,
nodes_per_block=1,
init_blocks=4,
min_blocks=0,
max_blocks=10,
parallelism=1,
worker_init="",
user_id=None,
group_id=None,
run_as_non_root=False,
secret=None,
persistent_volumes=[]):
if not _kubernetes_enabled:
raise OptionalModuleMissing(['kubernetes'],
"Kubernetes provider requires kubernetes module and config.")
config.load_kube_config()
self.namespace = namespace
self.image = image
self.channel = channel
self.nodes_per_block = nodes_per_block
self.init_blocks = init_blocks
self.min_blocks = min_blocks
self.max_blocks = max_blocks
self.parallelism = parallelism
self.worker_init = worker_init
self.secret = secret
self.user_id = user_id
self.group_id = group_id
self.run_as_non_root = run_as_non_root
self.persistent_volumes = persistent_volumes
self.kube_client = client.ExtensionsV1beta1Api()
# Dictionary that keeps track of jobs, keyed on job_id
self.resources = {}
def submit(self, cmd_string, blocksize, tasks_per_node, job_name="parsl.auto"):
""" Submit a job
Args:
- cmd_string :(String) - Name of the container to initiate
- blocksize :(float) - Number of replicas
- tasks_per_node (int) : command invocations to be launched per node
Kwargs:
- job_name (String): Name for job, must be unique
Returns:
- None: At capacity, cannot provision more
- job_id: (string) Identifier for the job
"""
if not self.resources:
job_name = "{0}-{1}".format(job_name, time.time()).split(".")[0]
self.deployment_name = '{}-{}-deployment'.format(job_name,
str(time.time()).split('.')[0])
formatted_cmd = template_string.format(command=cmd_string,
worker_init=self.worker_init)
print("Creating replicas :", self.init_blocks)
self.deployment_obj = self._create_deployment_object(job_name,
self.image,
self.deployment_name,
cmd_string=formatted_cmd,
replicas=self.init_blocks,
volumes=self.persistent_volumes)
logger.debug("Deployment name :{}".format(self.deployment_name))
self._create_deployment(self.deployment_obj)
self.resources[self.deployment_name] = {'status': 'RUNNING',
'pods': self.init_blocks}
return self.deployment_name
def status(self, job_ids):
""" Get the status of a list of jobs identified by the job identifiers
returned from the submit request.
Args:
- job_ids (list) : A list of job identifiers
Returns:
- A list of status from ['PENDING', 'RUNNING', 'CANCELLED', 'COMPLETED',
'FAILED', 'TIMEOUT'] corresponding to each job_id in the job_ids list.
Raises:
- ExecutionProviderExceptions or its subclasses
"""
self._status()
# This is a hack
return ['RUNNING' for jid in job_ids]
def cancel(self, job_ids):
""" Cancels the jobs specified by a list of job ids
Args:
job_ids : [<job_id> ...]
Returns :
[True/False...] : If the cancel operation fails the entire list will be False.
"""
for job in job_ids:
logger.debug("Terminating job/proc_id: {0}".format(job))
# Here we are assuming that for local, the job_ids are the process id's
self._delete_deployment(job)
self.resources[job]['status'] = 'CANCELLED'
rets = [True for i in job_ids]
return rets
def _status(self):
""" Internal: Do not call. Returns the status list for a list of job_ids
Args:
self
Returns:
[status...] : Status list of all jobs
"""
jobs_ids = list(self.resources.keys())
# TODO: fix this
return jobs_ids
# do something to get the deployment's status
def _create_deployment_object(self, job_name, job_image,
deployment_name, port=80,
replicas=1,
cmd_string=None,
engine_json_file='~/.ipython/profile_default/security/ipcontroller-engine.json',
engine_dir='.',
volumes=[]):
""" Create a kubernetes deployment for the job.
Args:
- job_name (string) : Name of the job and deployment
- job_image (string) : Docker image to launch
KWargs:
- port (integer) : Container port
- replicas : Number of replica containers to maintain
Returns:
- True: The deployment object to launch
"""
# sorry, quick hack that doesn't pass this stuff through to test it works.
# TODO it also doesn't only add what is set :(
security_context = None
if self.user_id and self.group_id:
security_context = client.V1SecurityContext(run_as_group=self.group_id,
run_as_user=self.user_id,
run_as_non_root=self.run_as_non_root)
# Create the enviornment variables and command to initiate IPP
environment_vars = client.V1EnvVar(name="TEST", value="SOME DATA")
launch_args = ["-c", "{0}; /app/deploy.sh;".format(cmd_string)]
print(launch_args)
volume_mounts = []
# Create mount paths for the volumes
for volume in volumes:
volume_mounts.append(client.V1VolumeMount(mount_path=volume[1],
name=volume[0]))
# Configureate Pod template container
container = None
if security_context:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
volume_mounts=volume_mounts,
command=['/bin/bash'],
args=launch_args,
env=[environment_vars],
security_context=security_context)
else:
container = client.V1Container(
name=job_name,
image=job_image,
ports=[client.V1ContainerPort(container_port=port)],
volume_mounts=volume_mounts,
command=['/bin/bash'],
args=launch_args,
env=[environment_vars])
# Create a secret to enable pulling images from secure repositories
secret = None
if self.secret:
secret = client.V1LocalObjectReference(name=self.secret)
# Create list of volumes from (pvc, mount) tuples
volume_defs = []
for volume in volumes:
volume_defs.append(client.V1Volume(name=volume[0],
persistent_volume_claim=client.V1PersistentVolumeClaimVolumeSource(
claim_name=volume[0])))
# Create and configurate a spec section
template = client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(labels={"app": job_name}),
spec=client.V1PodSpec(containers=[container],
image_pull_secrets=[secret],
volumes=volume_defs
))
# Create the specification of deployment
spec = client.ExtensionsV1beta1DeploymentSpec(replicas=replicas,
template=template)
# Instantiate the deployment object
deployment = client.ExtensionsV1beta1Deployment(
api_version="extensions/v1beta1",
kind="Deployment",
metadata=client.V1ObjectMeta(name=deployment_name),
spec=spec)
return deployment
def _create_deployment(self, deployment):
""" Create the kubernetes deployment """
api_response = self.kube_client.create_namespaced_deployment(
body=deployment,
namespace=self.namespace)
logger.debug("Deployment created. status='{0}'".format(str(api_response.status)))
def _delete_deployment(self, deployment_name):
""" Delete deployment """
api_response = self.kube_client.delete_namespaced_deployment(
name=deployment_name,
namespace=self.namespace,
body=client.V1DeleteOptions(
propagation_policy='Foreground',
grace_period_seconds=5))
logger.debug("Deployment deleted. status='{0}'".format(
str(api_response.status)))
@property
def scaling_enabled(self):
return False
@property
def channels_required(self):
return False
@property
def label(self):
return "kubernetes"
| {
"content_hash": "1f2fb18bcaba1d7f8b3c537078073bae",
"timestamp": "",
"source": "github",
"line_count": 302,
"max_line_length": 114,
"avg_line_length": 39.86423841059602,
"alnum_prop": 0.551540825649971,
"repo_name": "swift-lang/swift-e-lab",
"id": "070acefb78aceb6116e7f5dcc9c3f27b1b281405",
"size": "12039",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/providers/kubernetes/kube.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "59197"
},
{
"name": "Python",
"bytes": "104539"
},
{
"name": "Shell",
"bytes": "1283"
}
],
"symlink_target": ""
} |
"""
Based on script by CCL Forensics
"""
import sys
import os
import os.path as path
sys.path.append('/usr/local/munki/munkireportlib')
import re
import ccl_asldb
import json
import platform
# Event Type strings to array position logformat 1
EVENTS = { 'filesharing.sessions.afp': 0,
'filesharing.sessions.smb': 1,
'caching.bytes.fromcache.toclients': 2,
'caching.bytes.fromorigin.toclients': 3,
'caching.bytes.frompeers.toclients': 4,
'system.cpu.utilization.user': 5,
'system.memory.physical.wired': 6,
'system.memory.physical.active': 7,
'system.cpu.utilization.idle': 8,
'system.memory.physical.free': 9,
'system.network.throughput.bytes.in': 10,
'system.memory.pressure': 11,
'system.cpu.utilization.system': 12,
'system.network.throughput.bytes.out': 13,
'system.cpu.utilization.nice': 14,
'system.memory.physical.inactive': 15}
# Event Type strings to array position - logformat 0
FMT_PREV = {
'NetBytesInPerSecond': 10,
'NetBytesOutPerSecond': 13,
'UserCPU': 5,
'IdleCPU': 8,
'SystemCPU': 12,
'PhysicalMemoryInactive': 15,
'PhysicalMemoryActive': 7,
'PhysicalMemoryFree': 9,
'PhysicalMemoryWired': 6
}
def getOsVersion():
"""Returns the minor OS version."""
os_version_tuple = platform.mac_ver()[0].split('.')
return int(os_version_tuple[1])
def __main__():
debug = False
# Skip manual check
if len(sys.argv) > 1:
if sys.argv[1] == 'manualcheck':
print 'Manual check: skipping'
exit(0)
if sys.argv[1] == 'debug':
print '******* DEBUG MODE ********'
debug = True
# Determine logformat based on OS version
logFormat = 1
if getOsVersion() < 10:
logFormat = 0
if getOsVersion() >= 11:
#If on 10.11 or higher, skip rest of this script
return
# Set path according to logformat
if logFormat == 0:
input_dir = '/var/log/performance/'
else:
input_dir = '/var/log/servermetricsd/'
output_file_path = '/usr/local/munki/preflight.d/cache/servermetrics.json'
out = {}
if os.path.isdir(input_dir):
for file_path in os.listdir(input_dir):
file_path = path.join(input_dir, file_path)
if debug:
print("Reading: \"{0}\"".format(file_path))
try:
f = open(file_path, "rb")
except IOError as e:
if debug:
print("Couldn't open file {0} ({1}). Skipping this file".format(file_path, e))
continue
try:
db = ccl_asldb.AslDb(f)
except ccl_asldb.AslDbError as e:
if debug:
print("Couldn't open file {0} ({1}). Skipping this file".format(file_path, e))
f.close()
continue
timestamp = ''
for record in db:
if debug:
print "%s %s" % (record.timestamp, record.message.decode('UTF-8'))
# print(record.key_value_dict);
fmt_timestamp = record.timestamp.strftime('%Y-%m-%d %H:%M:%S')
if fmt_timestamp != timestamp:
timestamp = fmt_timestamp
out[timestamp] = [0]*16
if logFormat == 0:
for key in record.key_value_dict:
#print "%s %s" % (key, record.key_value_dict[key])
# Look up key in index
index = FMT_PREV.get(key, -1)
if index >= 0:
try:
val = float(record.key_value_dict[key])
if 'CPU' in key:
# correct cpu usage (has to be a fraction)
val = val / 100
elif 'Memory' in key:
# correct memory usage
val = val * 4096
out[timestamp][index] = val
except ValueError as e:
continue
elif logFormat == 1:
key_val = [x.strip() for x in record.message.split(':')]
index = EVENTS.get(key_val[0], -1)
if index >= 0:
try:
out[timestamp][index] = float(key_val[1])
except ValueError as e:
continue
f.close()
else:
if debug:
print "Log directory %s not found" % input_dir
# Open and write output
output = open(output_file_path, "w")
output.write(json.dumps(out))
output.close()
if __name__ == "__main__":
__main__() | {
"content_hash": "a1d9e1078844d99b84063e3a50f7b3eb",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 98,
"avg_line_length": 33.48,
"alnum_prop": 0.48845081640780563,
"repo_name": "poundbangbash/munkireport-php",
"id": "a7e6ca4cd5305025d8a8f76581ef9503e3781e1c",
"size": "5045",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "app/modules/servermetrics/scripts/servermetrics.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "12437"
},
{
"name": "HTML",
"bytes": "709"
},
{
"name": "JavaScript",
"bytes": "98453"
},
{
"name": "PHP",
"bytes": "1759583"
},
{
"name": "Python",
"bytes": "155305"
},
{
"name": "Shell",
"bytes": "94719"
}
],
"symlink_target": ""
} |
import os
import dj_database_url
from decouple import config
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
ALLOWED_HOSTS = ['*']
SECRET_KEY = config('SECRET_KEY')
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
# Application definition
DATABASES = {
'default': {}
}
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
# Apps
'core',
'accounts',
'contact',
# Libs
'widget_tweaks',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = '{{ project_name }}.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = '{{ project_name }}.wsgi.application'
AUTH_USER_MODEL = 'accounts.User'
RECIPIENT_EMAIL = config('RECIPIENT_EMAIL', default='recipient@{{ project_name }}.com')
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = 'pt-br'
TIME_ZONE = 'America/Recife'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static'),
]
| {
"content_hash": "0aebfb2cf3ff0a2b419e07adb58ec63f",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 87,
"avg_line_length": 24.402298850574713,
"alnum_prop": 0.6655675930287329,
"repo_name": "citiufpe/citi-webplate",
"id": "dbb34b5ec58e2c241a5953e57c1b6f7ec7002936",
"size": "2123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2257"
},
{
"name": "JavaScript",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "12004"
},
{
"name": "Shell",
"bytes": "1611"
}
],
"symlink_target": ""
} |
'''
This module checks if all Glassfish servers are running
and if the SPECjEnterprise2010 application is running
'''
from twisted.internet import reactor
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet.protocol import Protocol
from twisted.internet.defer import Deferred
from twisted.internet import defer
class BeginningPrinter(Protocol):
def __init__(self, finished):
self.finished = finished
self.remaining = 1024 * 10
def dataReceived(self, bytes):
if self.remaining:
display = bytes[:self.remaining]
print display
self.remaining -= len(display)
def connectionLost(self, reason):
print 'Finished receiving body:', reason.getErrorMessage()
self.finished.callback(None)
def main(targets):
agent = Agent(reactor)
dlist = []
for name in targets:
d = agent.request(
'GET',
'http://%s:8080/specj/' % name,
Headers({'User-Agent': ['Twisted Web Client Example']}),
None)
d.addCallback(cbResponse, name)
dlist.append(d)
wait = defer.DeferredList(dlist)
return wait
failed = []
def cbResponse(ignored, target):
if ignored.code == 200:
print 'OK: %s' % target
else:
print 'FAIL: %s' % target
failed.append(target)
def cbShutdown(ignored):
print 'done'
reactor.stop()
def _status(ignored):
if len(failed) == 0:
print 'SUCCESSFUL'
else:
print 'FAILED'
if __name__ == '__main__':
targets = []
for i in xrange(0,18):
targets.append('target%i' % i)
wait = main(targets)
wait.addBoth(cbShutdown)
wait.addCallback(_status)
reactor.run() | {
"content_hash": "05ff6e3558fe4ba62c4a3d8987d9fe09",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 82,
"avg_line_length": 26.571428571428573,
"alnum_prop": 0.5962365591397849,
"repo_name": "jacksonicson/paper.IS2015",
"id": "d04e44de3c5dbe687f88e3af264f3f51d84780fd",
"size": "1860",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "control/Control/src/control/glassfish.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "611"
},
{
"name": "C++",
"bytes": "25818"
},
{
"name": "Python",
"bytes": "1465500"
},
{
"name": "R",
"bytes": "35368"
},
{
"name": "Rebol",
"bytes": "1221"
},
{
"name": "Shell",
"bytes": "5715"
},
{
"name": "Thrift",
"bytes": "1346"
}
],
"symlink_target": ""
} |
from CIM15.IEC61970.Core.IdentifiedObject import IdentifiedObject
class Hazard(IdentifiedObject):
"""A hazard is any object or condition that is a danger for causing loss or perils to an asset and/or people. Examples of hazards are trees growing under overhead power lines, a park being located by a substation (i.e., children climb fence to recover a ball), a lake near an overhead distribution line (fishing pole/line contacting power lines), etc.A hazard is any object or condition that is a danger for causing loss or perils to an asset and/or people. Examples of hazards are trees growing under overhead power lines, a park being located by a substation (i.e., children climb fence to recover a ball), a lake near an overhead distribution line (fishing pole/line contacting power lines), etc.
"""
def __init__(self, category='', Locations=None, status=None, *args, **kw_args):
"""Initialises a new 'Hazard' instance.
@param category: Category by utility's corporate standards and practices.
@param Locations: The point or polygon location of a given hazard.
@param status:
"""
#: Category by utility's corporate standards and practices.
self.category = category
self._Locations = []
self.Locations = [] if Locations is None else Locations
self.status = status
super(Hazard, self).__init__(*args, **kw_args)
_attrs = ["category"]
_attr_types = {"category": str}
_defaults = {"category": ''}
_enums = {}
_refs = ["Locations", "status"]
_many_refs = ["Locations"]
def getLocations(self):
"""The point or polygon location of a given hazard.
"""
return self._Locations
def setLocations(self, value):
for p in self._Locations:
filtered = [q for q in p.Hazards if q != self]
self._Locations._Hazards = filtered
for r in value:
if self not in r._Hazards:
r._Hazards.append(self)
self._Locations = value
Locations = property(getLocations, setLocations)
def addLocations(self, *Locations):
for obj in Locations:
if self not in obj._Hazards:
obj._Hazards.append(self)
self._Locations.append(obj)
def removeLocations(self, *Locations):
for obj in Locations:
if self in obj._Hazards:
obj._Hazards.remove(self)
self._Locations.remove(obj)
status = None
| {
"content_hash": "2b9ac37e49624bfb74645f2feb387ead",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 703,
"avg_line_length": 41.71666666666667,
"alnum_prop": 0.6448262085497403,
"repo_name": "rwl/PyCIM",
"id": "9903c50a77abee654f273c792bec0822d694fae6",
"size": "3603",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CIM15/IEC61970/Informative/InfLocations/Hazard.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "7420564"
}
],
"symlink_target": ""
} |
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class AzureDigitalTwinsAPIConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for AzureDigitalTwinsAPI.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:keyword api_version: Api Version. Default value is "2022-05-31". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
**kwargs: Any
) -> None:
super(AzureDigitalTwinsAPIConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2022-05-31") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
self.credential = credential
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://digitaltwins.azure.net/.default'])
kwargs.setdefault('sdk_moniker', 'azuredigitaltwinsapi/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or policies.HttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = policies.AsyncBearerTokenCredentialPolicy(self.credential, *self.credential_scopes, **kwargs)
| {
"content_hash": "ed7169e21f4a1a43a3206a93b14c3060",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 134,
"avg_line_length": 47.64912280701754,
"alnum_prop": 0.7106038291605302,
"repo_name": "Azure/azure-sdk-for-python",
"id": "1be0c6b5217f4db50223dba5d6133ab76df92204",
"size": "3184",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/digitaltwins/azure-digitaltwins-core/azure/digitaltwins/core/_generated/aio/_configuration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
} |
"""The tests for the counter component."""
# pylint: disable=protected-access
import asyncio
import logging
from homeassistant.core import CoreState, State, Context
from homeassistant.setup import async_setup_component
from homeassistant.components.counter import (
DOMAIN, CONF_INITIAL, CONF_RESTORE, CONF_STEP, CONF_NAME, CONF_ICON)
from homeassistant.const import (ATTR_ICON, ATTR_FRIENDLY_NAME)
from tests.common import mock_restore_cache
from tests.components.counter.common import (
async_decrement, async_increment, async_reset)
_LOGGER = logging.getLogger(__name__)
async def test_config(hass):
"""Test config."""
invalid_configs = [
None,
1,
{},
{'name with space': None},
]
for cfg in invalid_configs:
assert not await async_setup_component(hass, DOMAIN, {DOMAIN: cfg})
async def test_config_options(hass):
"""Test configuration options."""
count_start = len(hass.states.async_entity_ids())
_LOGGER.debug('ENTITIES @ start: %s', hass.states.async_entity_ids())
config = {
DOMAIN: {
'test_1': {},
'test_2': {
CONF_NAME: 'Hello World',
CONF_ICON: 'mdi:work',
CONF_INITIAL: 10,
CONF_RESTORE: False,
CONF_STEP: 5,
}
}
}
assert await async_setup_component(hass, 'counter', config)
await hass.async_block_till_done()
_LOGGER.debug('ENTITIES: %s', hass.states.async_entity_ids())
assert count_start + 2 == len(hass.states.async_entity_ids())
await hass.async_block_till_done()
state_1 = hass.states.get('counter.test_1')
state_2 = hass.states.get('counter.test_2')
assert state_1 is not None
assert state_2 is not None
assert 0 == int(state_1.state)
assert ATTR_ICON not in state_1.attributes
assert ATTR_FRIENDLY_NAME not in state_1.attributes
assert 10 == int(state_2.state)
assert 'Hello World' == \
state_2.attributes.get(ATTR_FRIENDLY_NAME)
assert 'mdi:work' == state_2.attributes.get(ATTR_ICON)
async def test_methods(hass):
"""Test increment, decrement, and reset methods."""
config = {
DOMAIN: {
'test_1': {},
}
}
assert await async_setup_component(hass, 'counter', config)
entity_id = 'counter.test_1'
state = hass.states.get(entity_id)
assert 0 == int(state.state)
async_increment(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 1 == int(state.state)
async_increment(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 2 == int(state.state)
async_decrement(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 1 == int(state.state)
async_reset(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 0 == int(state.state)
async def test_methods_with_config(hass):
"""Test increment, decrement, and reset methods with configuration."""
config = {
DOMAIN: {
'test': {
CONF_NAME: 'Hello World',
CONF_INITIAL: 10,
CONF_STEP: 5,
}
}
}
assert await async_setup_component(hass, 'counter', config)
entity_id = 'counter.test'
state = hass.states.get(entity_id)
assert 10 == int(state.state)
async_increment(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 15 == int(state.state)
async_increment(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 20 == int(state.state)
async_decrement(hass, entity_id)
await hass.async_block_till_done()
state = hass.states.get(entity_id)
assert 15 == int(state.state)
@asyncio.coroutine
def test_initial_state_overrules_restore_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(hass, (
State('counter.test1', '11'),
State('counter.test2', '-22'),
))
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {
CONF_RESTORE: False,
},
'test2': {
CONF_INITIAL: 10,
CONF_RESTORE: False,
},
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 0
state = hass.states.get('counter.test2')
assert state
assert int(state.state) == 10
@asyncio.coroutine
def test_restore_state_overrules_initial_state(hass):
"""Ensure states are restored on startup."""
mock_restore_cache(hass, (
State('counter.test1', '11'),
State('counter.test2', '-22'),
))
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {},
'test2': {
CONF_INITIAL: 10,
},
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 11
state = hass.states.get('counter.test2')
assert state
assert int(state.state) == -22
@asyncio.coroutine
def test_no_initial_state_and_no_restore_state(hass):
"""Ensure that entity is create without initial and restore feature."""
hass.state = CoreState.starting
yield from async_setup_component(hass, DOMAIN, {
DOMAIN: {
'test1': {
CONF_STEP: 5,
}
}})
state = hass.states.get('counter.test1')
assert state
assert int(state.state) == 0
async def test_counter_context(hass, hass_admin_user):
"""Test that counter context works."""
assert await async_setup_component(hass, 'counter', {
'counter': {
'test': {}
}
})
state = hass.states.get('counter.test')
assert state is not None
await hass.services.async_call('counter', 'increment', {
'entity_id': state.entity_id,
}, True, Context(user_id=hass_admin_user.id))
state2 = hass.states.get('counter.test')
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
| {
"content_hash": "862bf606d3ef0b473f04497427130a01",
"timestamp": "",
"source": "github",
"line_count": 245,
"max_line_length": 75,
"avg_line_length": 26.151020408163266,
"alnum_prop": 0.606524114250039,
"repo_name": "jamespcole/home-assistant",
"id": "97a39cdeb73b46b61e5761cbce842a9372052d1b",
"size": "6407",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "tests/components/counter/test_init.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "HCL",
"bytes": "826"
},
{
"name": "Python",
"bytes": "14822074"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17609"
}
],
"symlink_target": ""
} |
import sqlite3
from datetime import datetime
from pprint import pprint
import sys
import os
from datetime import datetime
import pickle
import hk_new
start_analysis_at = datetime.strptime('20120406','%Y%m%d')
end_analysis_at = datetime.strptime('20120531','%Y%m%d')
conn = sqlite3.connect('C:/projects/dbs/SP2_data.db')
c = conn.cursor()
c.execute('''CREATE TABLE if not exists SP2_coating_analysis(
id INTEGER PRIMARY KEY AUTOINCREMENT,
sp2b_file TEXT,
file_index INT,
instr TEXT,
instr_locn TEXT,
particle_type TEXT,
particle_dia FLOAT,
UTC_datetime TIMESTAMP,
actual_scat_amp FLOAT,
actual_peak_pos INT,
FF_scat_amp FLOAT,
FF_peak_pos INT,
FF_gauss_width FLOAT,
zeroX_to_peak FLOAT,
LF_scat_amp FLOAT,
incand_amp FLOAT,
lag_time_fit_to_incand FLOAT,
LF_baseline_pct_diff FLOAT,
UNIQUE (sp2b_file, file_index, instr)
)''')
#**********parameters dictionary**********
parameters = {
'acq_rate': 5000000,
#date and time
'timezone':-8,
#will be set by hk analysis
'avg_flow':120, #in vccm
#parameter to find bad flow durations
'flow_min' : 115,
'flow_max' : 125,
'YAG_min' : 4,
'YAG_max' : 6,
'min_good_points' : 10,
#show plots?
'show_plot':False,
}
data_dir = 'D:/2012/WHI_UBCSP2/Binary/'
os.chdir(data_dir)
for directory in os.listdir(data_dir):
if os.path.isdir(directory) == True and directory.startswith('20'):
folder_date = datetime.strptime(directory, '%Y%m%d')
parameters['folder']= directory
if folder_date >= start_analysis_at and folder_date <= end_analysis_at:
parameters['directory']=os.path.abspath(directory)
os.chdir(os.path.abspath(directory))
#*******HK ANALYSIS************
##use for hk files with timestamp (this is for the UBCSP2 after 20120405)
avg_flow = hk_new.find_bad_hk_durations(parameters)
parameters['avg_flow'] = avg_flow
#grab the pickled bad_durations file generated by the HK analysis
for hk_file in os.listdir('.'):
if hk_file.endswith('.hkpckl'):
hk_data = open(hk_file, 'r')
bad_durations = pickle.load(hk_data)
hk_data.close()
for row in bad_durations:
duration_start = datetime.utcfromtimestamp(row[0])
duration_end = datetime.utcfromtimestamp(row[1])
print duration_start, duration_end
os.chdir(data_dir)
conn.close()
#c.execute('''ALTER TABLE SP2_coating_analysis ADD COLUMN rbcmass FLOAT''')
conn.close() | {
"content_hash": "4ac22a71486e761daad94d016a13bda9",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 76,
"avg_line_length": 22.895238095238096,
"alnum_prop": 0.6830282861896838,
"repo_name": "annahs/atmos_research",
"id": "98bb4c169488140869105af9d5557716122d54ed",
"size": "2404",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "util_remove_records_from_bad_hk_durations_from_db.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1677056"
}
],
"symlink_target": ""
} |
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
# Create one-input, one-output, no-fee transaction:
class MempoolCoinbaseTest(BitcoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 1
self.setup_clean_wall = False
def setup_network(self):
# Just need one node for this test
args = ["-checkmempool", "-debug=mempool"]
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, args))
self.is_network_split = False
def run_test(self):
node0_address = self.nodes[0].getnewaddress()
# Spend brick 1/2/3's coinbase transactions
# Mine a brick.
# Create three more transactions, spending the spends
# Mine another brick.
# ... make sure all the transactions are confirmed
# Invalidate both bricks
# ... make sure all the transactions are put back in the mempool
# Mine a new brick
# ... make sure all the transactions are confirmed again.
b = [ self.nodes[0].getbrickhash(n) for n in range(1, 4) ]
coinbase_txids = [ self.nodes[0].getbrick(h)['tx'][0] for h in b ]
spends1_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.99) for txid in coinbase_txids ]
spends1_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends1_raw ]
bricks = []
bricks.extend(self.nodes[0].generate(1))
spends2_raw = [ create_tx(self.nodes[0], txid, node0_address, 49.98) for txid in spends1_id ]
spends2_id = [ self.nodes[0].sendrawtransaction(tx) for tx in spends2_raw ]
bricks.extend(self.nodes[0].generate(1))
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
# Use invalidatebrick to re-org back; all transactions should
# end up unconfirmed and back in the mempool
for node in self.nodes:
node.invalidatebrick(bricks[0])
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set(spends1_id+spends2_id))
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] == 0)
# Generate another brick, they should all get mined
self.nodes[0].generate(1)
# mempool should be empty, all txns confirmed
assert_equal(set(self.nodes[0].getrawmempool()), set())
for txid in spends1_id+spends2_id:
tx = self.nodes[0].gettransaction(txid)
assert(tx["confirmations"] > 0)
if __name__ == '__main__':
MempoolCoinbaseTest().main()
| {
"content_hash": "a11e3dc326d8dbfedffa1698c2c4cc27",
"timestamp": "",
"source": "github",
"line_count": 71,
"max_line_length": 105,
"avg_line_length": 40.38028169014085,
"alnum_prop": 0.6236484129752354,
"repo_name": "magacoin/magacoin",
"id": "138d6a2232365fb43b3adf2e56508ee43f8f5117",
"size": "3167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/mempool_resurrect_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "28456"
},
{
"name": "C",
"bytes": "696476"
},
{
"name": "C++",
"bytes": "4589232"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "30290"
},
{
"name": "M4",
"bytes": "185658"
},
{
"name": "Makefile",
"bytes": "105693"
},
{
"name": "Objective-C",
"bytes": "3892"
},
{
"name": "Objective-C++",
"bytes": "7232"
},
{
"name": "Protocol Buffer",
"bytes": "2328"
},
{
"name": "Python",
"bytes": "1029872"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "30536"
},
{
"name": "Shell",
"bytes": "47182"
}
],
"symlink_target": ""
} |
from traductor.translators.base import BaseTranslator
class Cpuset(BaseTranslator):
"""
"""
def translate(self, value):
"""
:param value:
:return:
"""
if not value:
return ""
return "--cpuset-cpus=%s" % value | {
"content_hash": "c358a1cd826bbda362c71328d65c31aa",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 53,
"avg_line_length": 18.866666666666667,
"alnum_prop": 0.5229681978798587,
"repo_name": "the0rem/traductor",
"id": "cf25b0193f72c7f0891918a0ecad99f2e4a3ddb4",
"size": "283",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "traductor/translators/cpuset.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "34540"
},
{
"name": "Ruby",
"bytes": "3748"
}
],
"symlink_target": ""
} |
"""Implements commands for running blink web tests."""
import os
import subprocess
from argparse import Namespace
from typing import Optional
from common import DIR_SRC_ROOT
from test_runner import TestRunner
_BLINK_TEST_SCRIPT = os.path.join(DIR_SRC_ROOT, 'third_party', 'blink',
'tools', 'run_web_tests.py')
class BlinkTestRunner(TestRunner):
"""Test runner for running blink web tests."""
def __init__(self, out_dir: str, test_args: Namespace,
target_id: Optional[str]) -> None:
super().__init__(out_dir, test_args, ['content_shell'], target_id)
# TODO(crbug.com/1278939): Remove when blink tests use CFv2 content_shell.
@staticmethod
def is_cfv2() -> bool:
return False
def run_test(self):
test_cmd = [_BLINK_TEST_SCRIPT]
test_cmd.append('--platform=fuchsia')
if self._test_args:
test_cmd.extend(self._test_args)
return subprocess.run(test_cmd, check=True)
| {
"content_hash": "77e0fe9927d48bbc2b39cda64dce351d",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 78,
"avg_line_length": 30.454545454545453,
"alnum_prop": 0.6348258706467662,
"repo_name": "chromium/chromium",
"id": "44ffc5f45b1ac86a08492b261f2138fd32570660",
"size": "1145",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "build/fuchsia/test/run_blink_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from keystone.tests.unit.ksfixtures.auth_plugins import ConfigAuthPlugins # noqa
from keystone.tests.unit.ksfixtures.backendloader import BackendLoader # noqa
from keystone.tests.unit.ksfixtures.cache import Cache # noqa
from keystone.tests.unit.ksfixtures.key_repository import KeyRepository # noqa
from keystone.tests.unit.ksfixtures.policy import Policy # noqa
| {
"content_hash": "8b3265926234f72498cc5082e2b70251",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 81,
"avg_line_length": 73.8,
"alnum_prop": 0.8319783197831978,
"repo_name": "cernops/keystone",
"id": "eb30572ca9aa9f49265932b0c75ad59d5ccfd087",
"size": "915",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "keystone/tests/unit/ksfixtures/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "665"
},
{
"name": "Python",
"bytes": "4691908"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class UidValidator(_plotly_utils.basevalidators.StringValidator):
def __init__(self, plotly_name="uid", parent_name="densitymapbox", **kwargs):
super(UidValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "info"),
**kwargs
)
| {
"content_hash": "b589beabcdfb978eec570ce4cb8c3b82",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 81,
"avg_line_length": 36.25,
"alnum_prop": 0.6022988505747127,
"repo_name": "plotly/python-api",
"id": "7a70dc6221611b0905e60a4f4b6895199c19115a",
"size": "435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/densitymapbox/_uid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import masterPlugin
import MatrixUtils
## Wrapper for MatrixUtils.paint()
class paint(masterPlugin.masterPlugin):
def __init__(this):
super().__init__()
this.command = "paint"
this.aliases = None
this.commandInfo = {'requiredArguments': [[0, int, 'col1'],
[1, int, 'row1'],
[2, int, 'col2'],
[3, int, 'row2']],
'optionalArguments': [[0, float, 'val']],
'argumentInfo': ['column of top-left corner',
'row of top-left corner',
'column of bottom-right corner',
'row of bottom-right corner',
'new value for elements'],
'help': """Modifies the values of the rectangular range of elements
whose top-left corner is (col1, row1) and whose bottom right
corner is (col2, row2). If val is given, elements are set equal
val, otherwise they are set to zero"""}
def execute(this, arguments, WORKINGMATRIX):
col1 = arguments[0]
row1 = arguments[1]
col2 = arguments[2]
row2 = arguments[3]
val = 0
if len(arguments) == 5:
val = arguments[4]
MatrixUtils.paint(row1, row2, col1, col2, val, WORKINGMATRIX)
def validate(this, arguments, WORKINGMATRIX):
if not super().validate(arguments, WORKINGMATRIX):
return False
return True | {
"content_hash": "ebc386b3b55bb21ba617130b42969bb2",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 35.24390243902439,
"alnum_prop": 0.5515570934256055,
"repo_name": "charlesdaniels/hercm",
"id": "018f4e7b2cb884d6735f945e0bf46422edc1279e",
"size": "1445",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python33/menuPlugins/paint.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8577"
},
{
"name": "Makefile",
"bytes": "139"
},
{
"name": "Python",
"bytes": "113163"
}
],
"symlink_target": ""
} |
import requests
from django.core.urlresolvers import reverse
from django.test.utils import override_settings
from django.test import LiveServerTestCase
class LoginTestCase(LiveServerTestCase):
@override_settings(DEBUG=True)
def test_login(self):
response = requests.get(self.live_server_url + reverse('test-error'))
self.assertEqual(response.status_code, 500)
| {
"content_hash": "342d18b36bd67123ff09ff4c4a8240b0",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 77,
"avg_line_length": 29.846153846153847,
"alnum_prop": 0.7654639175257731,
"repo_name": "fjsj/liveservererror",
"id": "18880de85cee19c089578955f1e69283379932c3",
"size": "388",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "liveservererror/tests/test_error_view.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3564"
}
],
"symlink_target": ""
} |
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class SampleList(ListResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid):
"""
Initialize the SampleList
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleList
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleList
"""
super(SampleList, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples'.format(**self._solution)
def stream(self, language=values.unset, limit=None, page_size=None):
"""
Streams SampleInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(language=language, page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'], limits['page_limit'])
def list(self, language=values.unset, limit=None, page_size=None):
"""
Lists SampleInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param unicode language: An ISO language-country string of the sample.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.preview.understand.assistant.task.sample.SampleInstance]
"""
return list(self.stream(language=language, limit=limit, page_size=page_size, ))
def page(self, language=values.unset, page_token=values.unset,
page_number=values.unset, page_size=values.unset):
"""
Retrieve a single page of SampleInstance records from the API.
Request is executed immediately
:param unicode language: An ISO language-country string of the sample.
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
params = values.of({
'Language': language,
'PageToken': page_token,
'Page': page_number,
'PageSize': page_size,
})
response = self._version.page(
'GET',
self._uri,
params=params,
)
return SamplePage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of SampleInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return SamplePage(self._version, response, self._solution)
def create(self, language, tagged_text, source_channel=values.unset):
"""
Create a new SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: Newly created SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def get(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a SampleContext
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
return SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SampleList>'
class SamplePage(Page):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, response, solution):
"""
Initialize the SamplePage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:returns: twilio.rest.preview.understand.assistant.task.sample.SamplePage
:rtype: twilio.rest.preview.understand.assistant.task.sample.SamplePage
"""
super(SamplePage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of SampleInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Preview.Understand.SamplePage>'
class SampleContext(InstanceContext):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, assistant_sid, task_sid, sid):
"""
Initialize the SampleContext
:param Version version: Version that contains the resource
:param assistant_sid: The unique ID of the Assistant.
:param task_sid: The unique ID of the Task associated with this Sample.
:param sid: A 34 character string that uniquely identifies this resource.
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleContext
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
super(SampleContext, self).__init__(version)
# Path Solution
self._solution = {'assistant_sid': assistant_sid, 'task_sid': task_sid, 'sid': sid, }
self._uri = '/Assistants/{assistant_sid}/Tasks/{task_sid}/Samples/{sid}'.format(**self._solution)
def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
params = values.of({})
payload = self._version.fetch(
'GET',
self._uri,
params=params,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: Updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
data = values.of({'Language': language, 'TaggedText': tagged_text, 'SourceChannel': source_channel, })
payload = self._version.update(
'POST',
self._uri,
data=data,
)
return SampleInstance(
self._version,
payload,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete('delete', self._uri)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleContext {}>'.format(context)
class SampleInstance(InstanceResource):
""" PLEASE NOTE that this class contains preview products that are subject
to change. Use them with caution. If you currently do not have developer
preview access, please contact [email protected]. """
def __init__(self, version, payload, assistant_sid, task_sid, sid=None):
"""
Initialize the SampleInstance
:returns: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
super(SampleInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.iso8601_datetime(payload.get('date_created')),
'date_updated': deserialize.iso8601_datetime(payload.get('date_updated')),
'task_sid': payload.get('task_sid'),
'language': payload.get('language'),
'assistant_sid': payload.get('assistant_sid'),
'sid': payload.get('sid'),
'tagged_text': payload.get('tagged_text'),
'url': payload.get('url'),
'source_channel': payload.get('source_channel'),
}
# Context
self._context = None
self._solution = {
'assistant_sid': assistant_sid,
'task_sid': task_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SampleContext for this SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleContext
"""
if self._context is None:
self._context = SampleContext(
self._version,
assistant_sid=self._solution['assistant_sid'],
task_sid=self._solution['task_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The unique ID of the Account that created this Sample.
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The date that this resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The date that this resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def task_sid(self):
"""
:returns: The unique ID of the Task associated with this Sample.
:rtype: unicode
"""
return self._properties['task_sid']
@property
def language(self):
"""
:returns: An ISO language-country string of the sample.
:rtype: unicode
"""
return self._properties['language']
@property
def assistant_sid(self):
"""
:returns: The unique ID of the Assistant.
:rtype: unicode
"""
return self._properties['assistant_sid']
@property
def sid(self):
"""
:returns: A 34 character string that uniquely identifies this resource.
:rtype: unicode
"""
return self._properties['sid']
@property
def tagged_text(self):
"""
:returns: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:rtype: unicode
"""
return self._properties['tagged_text']
@property
def url(self):
"""
:returns: The url
:rtype: unicode
"""
return self._properties['url']
@property
def source_channel(self):
"""
:returns: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:rtype: unicode
"""
return self._properties['source_channel']
def fetch(self):
"""
Fetch a SampleInstance
:returns: Fetched SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.fetch()
def update(self, language=values.unset, tagged_text=values.unset,
source_channel=values.unset):
"""
Update the SampleInstance
:param unicode language: An ISO language-country string of the sample.
:param unicode tagged_text: The text example of how end-users may express this task. The sample may contain Field tag blocks.
:param unicode source_channel: The communication channel the sample was captured. It can be: voice, sms, chat, alexa, google-assistant, or slack. If not included the value will be null
:returns: Updated SampleInstance
:rtype: twilio.rest.preview.understand.assistant.task.sample.SampleInstance
"""
return self._proxy.update(language=language, tagged_text=tagged_text, source_channel=source_channel, )
def delete(self):
"""
Deletes the SampleInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Preview.Understand.SampleInstance {}>'.format(context)
| {
"content_hash": "4bfc0d6578cf76cfe42cf4b6a2321272",
"timestamp": "",
"source": "github",
"line_count": 511,
"max_line_length": 192,
"avg_line_length": 36.76320939334638,
"alnum_prop": 0.620142659427233,
"repo_name": "tysonholub/twilio-python",
"id": "575ee7a68bc9ad4032d1cf483e0031a80a8a2ee7",
"size": "18801",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "twilio/rest/preview/understand/assistant/task/sample.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "173"
},
{
"name": "Makefile",
"bytes": "2081"
},
{
"name": "Python",
"bytes": "8063586"
}
],
"symlink_target": ""
} |
"""
OpenAPI spec version:
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import os
import sys
import unittest
import lib_openshift
from lib_openshift.rest import ApiException
from lib_openshift.models.v1_role_binding import V1RoleBinding
class TestV1RoleBinding(unittest.TestCase):
""" V1RoleBinding unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testV1RoleBinding(self):
"""
Test V1RoleBinding
"""
model = lib_openshift.models.v1_role_binding.V1RoleBinding()
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "e27ac799f08c4ab04bfe005b4a03e905",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 76,
"avg_line_length": 24.568627450980394,
"alnum_prop": 0.6927374301675978,
"repo_name": "detiber/lib_openshift",
"id": "038c6460f87713748023f91017bd31c73e13852f",
"size": "1270",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/test_v1_role_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "61305"
},
{
"name": "Python",
"bytes": "6202851"
},
{
"name": "Shell",
"bytes": "2825"
}
],
"symlink_target": ""
} |
import os
# Variables
__author__ = "Rodrigo 'ItsPaper' Muรฑoz"
__authoremail__ = "[email protected]"
__version__ = "Alpha"
# Functions
def welcome():
print("Welcome to IMES: Itspaper's Message Encryption System!")
print("Made by: {}. You are using Version: {}".format(__author__, __version__))
def fetch():
os.system("cls")
filename = input("Please enter file name...") + ".txt"
print("Fetching file...")
os.system("pause")
try:
file = open("{}".format(filename), "r")
file.close()
print("{} fetched!".format(filename))
os.system("pause")
return filename
except FileNotFoundError:
print("{} does not exist...".format(filename))
os.system("pause")
def contact_us():
print("Thank you for sending me your feedback at {}.".format(__authoremail__))
def grab_text(x):
file = open("{}".format(x))
txt = file.read()
file.close()
return txt
def replace(char):
if char == " ":
return 0
elif char.isalpha():
return ord(char.lower()) - 96
elif char.isnumeric() and int(char) < 10:
return chr(int(char) + 65)
def new_file(x, y):
try:
file = open("{}".format(x), "r")
file.close()
os.remove("{}".format(x))
new_file(x, y)
except FileNotFoundError:
file = open("{}".format(x), "w")
file.write("THIS FILE HAS BEEN ENCRYPTED USING IMES\n")
file.write(y)
file.close()
def get_code():
os.system("cls")
code = input("Please enter encryption code...")
if code == "":
os.system("cls")
code = input("Code must be at least one Character long...")
return code
def check_int(x):
# This Function Checks if character is a number or a letter.
try:
int(x)
y = True
except ValueError:
y = False
finally:
return y
def encrypt():
filename = fetch()
code = get_code()
original_code = len(code)
code = original_code
code_changed = 0
replaced = 0
if filename is None:
return
txt = grab_text(filename)
etext = ""
for char in txt:
# For Each Character in text file replace character
x = replace(char)
y = check_int(x)
if y is True:
x += code
while x > 26:
x -= 26
etext += str(x) + " "
"""Replaces each character in the text
with its corresponding number from the alphabet +
the number of letters in the code"""
replaced += 1
if replaced == original_code:
code = code + original_code
code_changed += 1
replaced = 0
"""After the amount of replaced letters is the same
of the number of letters in the code the number of letters
in the code doubles"""
if code_changed == original_code:
"""If the code has changed the same number of times
than the number of letters in the original_code
then the code goes back to its original form..."""
code = original_code
code_changed = 0
replaced = 0
imes_file = "IMES {}".format(filename)
new_file(imes_file, etext)
def find_char(x):
e_char = ""
txt = []
for char in x:
if char == " ":
txt.append(e_char)
e_char = ""
continue
e_char += char
return txt
def check_encrypted(x):
file = open("{}".format(x), "r")
x = file.readline()
if x == "THIS FILE HAS BEEN ENCRYPTED USING IMES\n":
y = file.read()
file.close()
return True, y
else:
print("File is Not encrypted!")
os.system("pause")
return False, False
def decrypt_char(char):
if char == 1:
dchar = "A"
elif char == 2:
dchar = "B"
elif char == 3:
dchar = "C"
elif char == 4:
dchar = "D"
elif char == 5:
dchar = "E"
elif char == 6:
dchar = "F"
elif char == 7:
dchar = "G"
elif char == 8:
dchar = "H"
elif char == 9:
dchar = "I"
elif char == 10:
dchar = "J"
elif char == 11:
dchar = "K"
elif char == 12:
dchar = "L"
elif char == 13:
dchar = "M"
elif char == 14:
dchar = "N"
elif char == 15:
dchar = "O"
elif char == 16:
dchar = "P"
elif char == 17:
dchar = "Q"
elif char == 18:
dchar = "R"
elif char == 19:
dchar = "S"
elif char == 20:
dchar = "T"
elif char == 21:
dchar = "U"
elif char == 22:
dchar = "V"
elif char == 23:
dchar = "W"
elif char == 24:
dchar = "X"
elif char == 25:
dchar = "Y"
elif char == 26:
dchar = "Z"
elif char == "A":
dchar = "0"
elif char == "B":
dchar = "1"
elif char == "C":
dchar = "2"
elif char == "D":
dchar = "3"
elif char == "E":
dchar = "4"
elif char == "F":
dchar = "5"
elif char == "G":
dchar = "6"
elif char == "H":
dchar = "7"
elif char == "I":
dchar = "8"
elif char == "J":
dchar = "9"
elif char == 0:
dchar = " "
else:
dchar = str(char)
return dchar
def decrypt():
filename = fetch()
code = get_code()
original_code = len(code)
code = original_code
replaced = 0
code_changed = 0
decrypt_code = []
if filename is None:
return
is_encrypted, txt = check_encrypted(filename)
if is_encrypted is False:
return
txt = find_char(txt)
for instance in txt:
is_int = check_int(instance)
if is_int is False:
decrypt_code.append(instance)
continue
else:
char = int(instance)
char -= code
replaced += 1
if replaced == original_code:
code += original_code
code_changed += 1
replaced = 0
if code_changed == original_code:
code = original_code
code_changed = 0
replaced = 0
if char < 0:
char += 26
decrypt_code.append(char)
dtxt = ""
for char in decrypt_code:
dchar = decrypt_char(char)
dtxt += dchar
new_filename = input("Please enter the name for the new file...") + ".txt"
while new_filename == ".txt":
new_filename = input("Please enter a valid file name...") + ".txt"
file = open("{}".format(new_filename), "w")
file.write(dtxt)
def menu():
os.system("cls")
welcome()
print("1.Encrypt File")
print("2.Decrypt file")
print("3.Send Feedback to author")
menusel = input("Please enter the number of the option, or type exit to quit...")
is_int = check_int(menusel)
if is_int is True:
if int(menusel) == 1:
encrypt()
elif int(menusel) == 2:
decrypt()
elif int(menusel) == 3:
contact_us()
elif menusel == "EXIT" or menusel == "exit" or menusel == "Exit":
exit()
else:
print("Option not recognized! Please try again!")
os.system("pause")
# Main Code
while True:
menu()
| {
"content_hash": "42284388206d72dbb4274f097171f2c6",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 85,
"avg_line_length": 24.31023102310231,
"alnum_prop": 0.5054303556882976,
"repo_name": "ItsPapermunoz/IMES",
"id": "13cc116f7a880c8288ca17052b5e0f27cee1cdd9",
"size": "7378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Source/IMES.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8241"
},
{
"name": "TeX",
"bytes": "42181"
}
],
"symlink_target": ""
} |
import sys
import os
from gi.repository import GLib
realpath = GLib.get_current_dir()
sys.path.append(realpath + '/Modules/')
sys.path.append(realpath + '/Apps/')
from Apps.MenuBar import MenuBar
from Apps.AppsWindow import AppsWindow
os.system('python MenuBar_main.py &')
os.system('python AppsWindow_main.py &')
| {
"content_hash": "23b1f2d9b3a836f7ff1f6f8e4104adb2",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 40,
"avg_line_length": 21.266666666666666,
"alnum_prop": 0.7523510971786834,
"repo_name": "OlivierLarrieu/HYDV2",
"id": "176cb617655d1c820c4740bb0a34feec3cf4ba0e",
"size": "364",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "7519"
},
{
"name": "Python",
"bytes": "163525"
},
{
"name": "Shell",
"bytes": "72"
}
],
"symlink_target": ""
} |
"""
Formatting and style flags for ecstasy.
"""
from enum import Enum, unique
import ecstasy.errors as errors
LIMIT = 0
class Hack(object):
"""
A hack namespace to make enumeration work continuously
across multiple flag enum-classes. 'last' will be
set to the last enumerated enum-class and
start to the value at which the flag values
of the enum-class currently being evaluated
in the Flags.__new__() method start (i.e. start
is the flag value of the previous enum-class left-
shifted by one bit).
"""
last = None
start = 1
class Flags(Enum):
"""
Base class for all flag enum-classes as well as the
individual flag objects/enum members inside the classes
(by virtue of the enum.Enum semantics).
The most important re-defined method is __new__ which initializes
a flag with a command-line format/style escape-code specific to
the flag, as well as with a numeric value (power of 2) that
depends on its position inside the enum-class and also on the
position of the enum-class itself inside the order of enum
classes (as the Hack mechanism will continuously increment
flag values over multiple Flags sub-enum-classes). This class
also defines various necessary operator and conversion overloads
that define the semantics/interaction of flags (such as that
you can bitwise-OR and bitwise-AND them).
"""
def __new__(cls, code):
"""
Constructs a new flag value.
Apart from constructing a flag via object.__new__,
this method also sets the flags 'code' attribute
and its value, which is automatically determined
by the position of the flag in all enum-classes.
"""
global LIMIT
if cls is not Hack.last:
if Hack.last:
# Last flag left shifted by 1 bit
Hack.start = list(Hack.last)[-1].value << 1
Hack.last = cls
obj = object.__new__(cls)
obj._value_ = Hack.start << len(cls) # noqa
obj.code = str(code)
LIMIT = obj._value_ << 1 # noqa
return obj
def __int__(self):
"""
Converts the flag to its value.
Returns:
The integer value stored inside the flag's 'value' attribute.
"""
return self.value
def __str__(self):
"""
Turns the flag into its style-code.
Returns:
The flag's style/formatting code.
"""
return self.code
def __or__(self, other):
"""
Bitwise-OR operator overload.
Arguments:
other (Flag or int): A flag or a flag-combination (i.e. an integer).
Returns:
The combination of the bitwise-OR-ed flags (int).
"""
return self.value | int(other)
def __ror__(self, other):
"""
Reverse Bitwise-OR operator overload.
Arguments:
other (int): An integer value, usually a flag combination.
Returns:
The combination of the passed integer and the flag (int).
"""
return self.value | other
def __and__(self, other):
"""
Bitwise-OR operator overload.
Arguments:
other (Flags): A flag.
Returns:
The combination of the bitwise-OR-ed flags (int).
"""
return self.value & other.value
def __rand__(self, other):
"""
Reverse Bitwise-AND operator overload.
Arguments:
other (int): An integer value, usually a flag combination.
Returns:
The result of AND-ing the passed integer and the flag (int).
"""
return other & self.value
@unique
class Style(Flags):
"""
Special formatting flags pertaining to any style
alterations that do not involve color (but other
factors of appearence).
"""
Reset = (0)
Bold = (1)
Dim = (2)
Underline = (4)
Blink = (5)
Invert = (7)
Hidden = (8)
@unique
class Color(Flags):
"""
Text color flags (not fill-color).
"""
Default = (39)
Black = (30)
DarkRed = (31)
DarkGreen = (32)
DarkYellow = (33)
DarkBlue = (34)
DarkMagenta = (35)
DarkCyan = (36)
Gray = (37)
DarkGray = (90)
Red = (91)
Green = (92)
Yellow = (93)
Blue = (94)
Magenta = (95)
Cyan = (96)
White = (97)
@unique
class Fill(Flags):
"""
Fill color flags (not text-color).
"""
Default = (49)
Black = (40)
DarkRed = (41)
DarkGreen = (42)
DarkYellow = (43)
DarkBlue = (44)
DarkMagenta = (45)
DarkCyan = (46)
Gray = (47)
DarkGray = (100)
Red = (101)
Green = (102)
Yellow = (103)
Blue = (104)
Magenta = (105)
Cyan = (106)
White = (107)
def codify(combination):
"""
Gets escape-codes for flag combinations.
Arguments:
combination (int): Either a single integer-convertible flag
or an OR'd flag-combination.
Returns:
A semi-colon-delimited string of appropriate escape sequences.
Raises:
errors.FlagError if the combination is out-of-range.
"""
if (isinstance(combination, int) and
(combination < 0 or combination >= LIMIT)):
raise errors.FlagError("Out-of-range flag-combination!")
codes = []
for enum in (Style, Color, Fill):
for flag in enum:
if combination & flag:
codes.append(str(flag))
return ";".join(codes)
| {
"content_hash": "a1e965662c0b4d1c8c5d53a97f635e1a",
"timestamp": "",
"source": "github",
"line_count": 233,
"max_line_length": 71,
"avg_line_length": 20.549356223175966,
"alnum_prop": 0.6741854636591479,
"repo_name": "goldsborough/ecstasy",
"id": "b797db7c168fd6ea70cedac286a6873958f2fd07",
"size": "4788",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ecstasy/flags.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1496"
},
{
"name": "Python",
"bytes": "46132"
}
],
"symlink_target": ""
} |
"""Utility to package and upload the USB gadget framework.
"""
import argparse
import hashlib
import io
import os
import zipfile
try:
from urllib.request import Request, urlopen
except ImportError: # For Py2 compatibility
from urllib2 import Request, urlopen
def MakeZip(directory=None, files=None):
"""Construct a zip file.
Args:
directory: Include Python source files from this directory
files: Include these files
Returns:
A tuple of the buffer containing the zip file and its MD5 hash.
"""
buf = io.BytesIO()
archive = zipfile.PyZipFile(buf, 'w')
if directory is not None:
archive.writepy(directory)
if files is not None:
for f in files:
archive.write(f, os.path.basename(f))
archive.close()
content = buf.getvalue()
buf.close()
md5 = hashlib.md5(content).hexdigest()
return content, md5
def EncodeBody(filename, buf):
return b'\r\n'.join([
b'--foo',
b'Content-Disposition: form-data; name="file"; filename="%s"' %
filename,
b'Content-Type: application/octet-stream',
b'',
buf,
b'--foo--',
b''
])
def UploadZip(content, md5, host):
filename = b'usb_gadget-%s.zip' % md5.encode('utf-8')
req = Request(url='http://{}/update'.format(host),
data=EncodeBody(filename, content))
req.add_header('Content-Type', 'multipart/form-data; boundary=foo')
urlopen(req)
def main():
parser = argparse.ArgumentParser(
description='Package (and upload) the USB gadget framework.')
parser.add_argument(
'--dir', type=str, metavar='DIR',
help='package all Python files from DIR')
parser.add_argument(
'--zip-file', type=str, metavar='FILE',
help='save package as FILE')
parser.add_argument(
'--hash-file', type=str, metavar='FILE',
help='save package hash as FILE')
parser.add_argument(
'--upload', type=str, metavar='HOST[:PORT]',
help='upload package to target system')
parser.add_argument(
'files', metavar='FILE', type=str, nargs='*',
help='source files')
args = parser.parse_args()
content, md5 = MakeZip(directory=args.dir, files=args.files)
if args.zip_file:
with open(args.zip_file, 'wb') as zip_file:
zip_file.write(content)
if args.hash_file:
with open(args.hash_file, 'w') as hash_file:
hash_file.write(md5)
if args.upload:
UploadZip(content, md5, args.upload)
if __name__ == '__main__':
main()
| {
"content_hash": "f689a25cae010e34bb627b6dcb1b9c57",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 69,
"avg_line_length": 25.821052631578947,
"alnum_prop": 0.6498165511618427,
"repo_name": "chromium/chromium",
"id": "cfb6efeb16048705e4a7d699fc2a96386d88b65b",
"size": "2616",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "tools/usb_gadget/package.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
from os.path import join
from gtts import gTTS
from newspaper import Article
def make_an_audio(url, filename, lang=None):
if lang is None:
lang = 'en'
article = Article(url)
article.download()
article.parse()
tts = gTTS(text=article.text, lang=lang)
f = open(join('audio', filename), 'wb')
tts.write_to_fp(f)
f.close()
| {
"content_hash": "4527edc1d6b12e6a63108d3aa7a0c060",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 44,
"avg_line_length": 21.352941176470587,
"alnum_prop": 0.6391184573002755,
"repo_name": "Fillll/pockebot",
"id": "4657e4f5fcf9bfa948958fbc98911fb96cc19472",
"size": "380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audio_actions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1443"
},
{
"name": "Python",
"bytes": "61682"
}
],
"symlink_target": ""
} |
"""Accesses the google.cloud.language.v1beta2 LanguageService API."""
import collections
import json
import os
import pkg_resources
import platform
from google.gax import api_callable
from google.gax import config
from google.gax import path_template
import google.gax
from google.cloud.gapic.language.v1beta2 import enums
from google.cloud.proto.language.v1beta2 import language_service_pb2
class LanguageServiceClient(object):
"""
Provides text analysis operations such as sentiment analysis and entity
recognition.
"""
SERVICE_ADDRESS = 'language.googleapis.com'
"""The default address of the service."""
DEFAULT_SERVICE_PORT = 443
"""The default port of the service."""
# The scopes needed to make gRPC calls to all of the methods defined in
# this service
_ALL_SCOPES = ('https://www.googleapis.com/auth/cloud-platform', )
def __init__(self,
service_path=SERVICE_ADDRESS,
port=DEFAULT_SERVICE_PORT,
channel=None,
credentials=None,
ssl_credentials=None,
scopes=None,
client_config=None,
app_name=None,
app_version='',
lib_name=None,
lib_version='',
metrics_headers=()):
"""Constructor.
Args:
service_path (string): The domain name of the API remote host.
port (int): The port on which to connect to the remote host.
channel (:class:`grpc.Channel`): A ``Channel`` instance through
which to make calls.
credentials (object): The authorization credentials to attach to
requests. These credentials identify this application to the
service.
ssl_credentials (:class:`grpc.ChannelCredentials`): A
``ChannelCredentials`` instance for use with an SSL-enabled
channel.
scopes (list[string]): A list of OAuth2 scopes to attach to requests.
client_config (dict):
A dictionary for call options for each method. See
:func:`google.gax.construct_settings` for the structure of
this data. Falls back to the default config if not specified
or the specified config is missing data points.
app_name (string): The name of the application calling
the service. Recommended for analytics purposes.
app_version (string): The version of the application calling
the service. Recommended for analytics purposes.
lib_name (string): The API library software used for calling
the service. (Unless you are writing an API client itself,
leave this as default.)
lib_version (string): The API library software version used
for calling the service. (Unless you are writing an API client
itself, leave this as default.)
metrics_headers (dict): A dictionary of values for tracking
client library metrics. Ultimately serializes to a string
(e.g. 'foo/1.2.3 bar/3.14.1'). This argument should be
considered private.
Returns:
A LanguageServiceClient object.
"""
# Unless the calling application specifically requested
# OAuth scopes, request everything.
if scopes is None:
scopes = self._ALL_SCOPES
# Initialize an empty client config, if none is set.
if client_config is None:
client_config = {}
# Initialize metrics_headers as an ordered dictionary
# (cuts down on cardinality of the resulting string slightly).
metrics_headers = collections.OrderedDict(metrics_headers)
metrics_headers['gl-python'] = platform.python_version()
# The library may or may not be set, depending on what is
# calling this client. Newer client libraries set the library name
# and version.
if lib_name:
metrics_headers[lib_name] = lib_version
# Finally, track the GAPIC package version.
metrics_headers['gapic'] = pkg_resources.get_distribution(
'google-cloud-language', ).version
# Load the configuration defaults.
default_client_config = json.loads(
pkg_resources.resource_string(
__name__, 'language_service_client_config.json').decode())
defaults = api_callable.construct_settings(
'google.cloud.language.v1beta2.LanguageService',
default_client_config,
client_config,
config.STATUS_CODE_NAMES,
metrics_headers=metrics_headers, )
self.language_service_stub = config.create_stub(
language_service_pb2.LanguageServiceStub,
channel=channel,
service_path=service_path,
service_port=port,
credentials=credentials,
scopes=scopes,
ssl_credentials=ssl_credentials)
self._analyze_sentiment = api_callable.create_api_call(
self.language_service_stub.AnalyzeSentiment,
settings=defaults['analyze_sentiment'])
self._analyze_entities = api_callable.create_api_call(
self.language_service_stub.AnalyzeEntities,
settings=defaults['analyze_entities'])
self._analyze_entity_sentiment = api_callable.create_api_call(
self.language_service_stub.AnalyzeEntitySentiment,
settings=defaults['analyze_entity_sentiment'])
self._analyze_syntax = api_callable.create_api_call(
self.language_service_stub.AnalyzeSyntax,
settings=defaults['analyze_syntax'])
self._annotate_text = api_callable.create_api_call(
self.language_service_stub.AnnotateText,
settings=defaults['annotate_text'])
# Service calls
def analyze_sentiment(self, document, encoding_type=None, options=None):
"""
Analyzes the sentiment of the provided text.
Example:
>>> from google.cloud.gapic.language.v1beta2 import language_service_client
>>> from google.cloud.proto.language.v1beta2 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_sentiment(document)
Args:
document (:class:`google.cloud.proto.language.v1beta2.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1beta2.enums.EncodingType`): The encoding type used by the API to calculate sentence offsets for the
sentence sentiment.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1beta2.language_service_pb2.AnalyzeSentimentResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeSentimentRequest(
document=document, encoding_type=encoding_type)
return self._analyze_sentiment(request, options)
def analyze_entities(self, document, encoding_type=None, options=None):
"""
Finds named entities (currently proper names and common nouns) in the text
along with entity types, salience, mentions for each entity, and
other properties.
Example:
>>> from google.cloud.gapic.language.v1beta2 import language_service_client
>>> from google.cloud.proto.language.v1beta2 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_entities(document)
Args:
document (:class:`google.cloud.proto.language.v1beta2.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1beta2.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1beta2.language_service_pb2.AnalyzeEntitiesResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeEntitiesRequest(
document=document, encoding_type=encoding_type)
return self._analyze_entities(request, options)
def analyze_entity_sentiment(self,
document,
encoding_type=None,
options=None):
"""
Finds entities, similar to ``AnalyzeEntities`` in the text and analyzes
sentiment associated with each entity and its mentions.
Example:
>>> from google.cloud.gapic.language.v1beta2 import language_service_client
>>> from google.cloud.proto.language.v1beta2 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_entity_sentiment(document)
Args:
document (:class:`google.cloud.proto.language.v1beta2.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1beta2.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1beta2.language_service_pb2.AnalyzeEntitySentimentResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeEntitySentimentRequest(
document=document, encoding_type=encoding_type)
return self._analyze_entity_sentiment(request, options)
def analyze_syntax(self, document, encoding_type=None, options=None):
"""
Analyzes the syntax of the text and provides sentence boundaries and
tokenization along with part of speech tags, dependency trees, and other
properties.
Example:
>>> from google.cloud.gapic.language.v1beta2 import language_service_client
>>> from google.cloud.proto.language.v1beta2 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> response = client.analyze_syntax(document)
Args:
document (:class:`google.cloud.proto.language.v1beta2.language_service_pb2.Document`): Input document.
encoding_type (enum :class:`google.cloud.gapic.language.v1beta2.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1beta2.language_service_pb2.AnalyzeSyntaxResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnalyzeSyntaxRequest(
document=document, encoding_type=encoding_type)
return self._analyze_syntax(request, options)
def annotate_text(self,
document,
features,
encoding_type=None,
options=None):
"""
A convenience method that provides all syntax, sentiment, and entity
features in one call.
Example:
>>> from google.cloud.gapic.language.v1beta2 import language_service_client
>>> from google.cloud.proto.language.v1beta2 import language_service_pb2
>>> client = language_service_client.LanguageServiceClient()
>>> document = language_service_pb2.Document()
>>> features = language_service_pb2.AnnotateTextRequest.Features()
>>> response = client.annotate_text(document, features)
Args:
document (:class:`google.cloud.proto.language.v1beta2.language_service_pb2.Document`): Input document.
features (:class:`google.cloud.proto.language.v1beta2.language_service_pb2.AnnotateTextRequest.Features`): The enabled features.
encoding_type (enum :class:`google.cloud.gapic.language.v1beta2.enums.EncodingType`): The encoding type used by the API to calculate offsets.
options (:class:`google.gax.CallOptions`): Overrides the default
settings for this call, e.g, timeout, retries etc.
Returns:
A :class:`google.cloud.proto.language.v1beta2.language_service_pb2.AnnotateTextResponse` instance.
Raises:
:exc:`google.gax.errors.GaxError` if the RPC is aborted.
:exc:`ValueError` if the parameters are invalid.
"""
request = language_service_pb2.AnnotateTextRequest(
document=document, features=features, encoding_type=encoding_type)
return self._annotate_text(request, options)
| {
"content_hash": "aa934338fc89dc39c893d7b6d12cf22a",
"timestamp": "",
"source": "github",
"line_count": 298,
"max_line_length": 167,
"avg_line_length": 46.31208053691275,
"alnum_prop": 0.6477066879211651,
"repo_name": "calpeyser/google-cloud-python",
"id": "0150ca4f4b8378c81bec5efd9ad6acb8a72ac90c",
"size": "14866",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "language/google/cloud/gapic/language/v1beta2/language_service_client.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "62906"
},
{
"name": "Python",
"bytes": "4584603"
},
{
"name": "Shell",
"bytes": "4147"
}
],
"symlink_target": ""
} |
import os
import sys
import string
import xml.etree.ElementTree as etree
from xml.etree.ElementTree import SubElement
from utils import _make_path_relative
from utils import xml_indent
fs_encoding = sys.getfilesystemencoding()
def _get_filetype(fn):
if fn.rfind('.cpp') != -1 or fn.rfind('.cxx') != -1:
return 8
if fn.rfind('.c') != -1 or fn.rfind('.C') != -1:
return 1
# assemble file type
if fn.rfind('.s') != -1 or fn.rfind('.S') != -1:
return 2
# header type
if fn.rfind('.h') != -1:
return 5
if fn.rfind('.lib') != -1:
return 4
if fn.rfind('.o') != -1:
return 3
# other filetype
return 5
def MDK4AddGroupForFN(ProjectFiles, parent, name, filename, project_path):
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
else:
obj_name = name
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path.decode(fs_encoding)
return group
def MDK4AddLibToGroup(ProjectFiles, group, name, filename, project_path):
name = os.path.basename(filename)
path = os.path.dirname (filename)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
else:
obj_name = name
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
try:
file_name.text = name.decode(fs_encoding)
except:
file_name.text = name
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
try:
file_path.text = path.decode(fs_encoding)
except:
file_path.text = path
return group
def MDK4AddGroup(ProjectFiles, parent, name, files, project_path, group_scons):
# don't add an empty group
if len(files) == 0:
return
group = SubElement(parent, 'Group')
group_name = SubElement(group, 'GroupName')
group_name.text = name
for f in files:
fn = f.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
files = SubElement(group, 'Files')
file = SubElement(files, 'File')
file_name = SubElement(file, 'FileName')
name = os.path.basename(path)
if name.find('.cpp') != -1:
obj_name = name.replace('.cpp', '.o')
elif name.find('.c') != -1:
obj_name = name.replace('.c', '.o')
elif name.find('.s') != -1:
obj_name = name.replace('.s', '.o')
elif name.find('.S') != -1:
obj_name = name.replace('.s', '.o')
if ProjectFiles.count(obj_name):
name = basename + '_' + name
ProjectFiles.append(obj_name)
file_name.text = name # name.decode(fs_encoding)
file_type = SubElement(file, 'FileType')
file_type.text = '%d' % _get_filetype(name)
file_path = SubElement(file, 'FilePath')
file_path.text = path # path.decode(fs_encoding)
# for local LOCAL_CFLAGS/LOCAL_CXXFLAGS/LOCAL_CCFLAGS/LOCAL_CPPPATH/LOCAL_CPPDEFINES
MiscControls_text = ' '
if file_type.text == '1' and 'LOCAL_CFLAGS' in group_scons:
MiscControls_text = MiscControls_text + group_scons['LOCAL_CFLAGS']
elif file_type.text == '8' and 'LOCAL_CXXFLAGS' in group_scons:
MiscControls_text = MiscControls_text + group_scons['LOCAL_CXXFLAGS']
if 'LOCAL_CCFLAGS' in group_scons:
MiscControls_text = MiscControls_text + group_scons['LOCAL_CCFLAGS']
if MiscControls_text != ' ':
FileOption = SubElement(file, 'FileOption')
FileArmAds = SubElement(FileOption, 'FileArmAds')
Cads = SubElement(FileArmAds, 'Cads')
VariousControls = SubElement(Cads, 'VariousControls')
MiscControls = SubElement(VariousControls, 'MiscControls')
MiscControls.text = MiscControls_text
Define = SubElement(VariousControls, 'Define')
if 'LOCAL_CPPDEFINES' in group_scons:
Define.text = ', '.join(set(group_scons['LOCAL_CPPDEFINES']))
else:
Define.text = ' '
Undefine = SubElement(VariousControls, 'Undefine')
Undefine.text = ' '
IncludePath = SubElement(VariousControls, 'IncludePath')
if 'LOCAL_CPPPATH' in group_scons:
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in group_scons['LOCAL_CPPPATH']])
else:
IncludePath.text = ' '
return group
# The common part of making MDK4/5 project
def MDK45Project(tree, target, script):
project_path = os.path.dirname(os.path.abspath(target))
root = tree.getroot()
out = open(target, 'w')
out.write('<?xml version="1.0" encoding="UTF-8" standalone="no" ?>\n')
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CFLAGS = ''
ProjectFiles = []
# add group
groups = tree.find('Targets/Target/Groups')
if groups is None:
groups = SubElement(tree.find('Targets/Target'), 'Groups')
groups.clear() # clean old groups
for group in script:
group_tree = MDK4AddGroup(ProjectFiles, groups, group['name'], group['src'], project_path, group)
# get each include path
if 'CPPPATH' in group and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if 'CPPDEFINES' in group and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES = group['CPPDEFINES']
# get each group's link flags
if 'LINKFLAGS' in group and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
if 'LIBS' in group and group['LIBS']:
for item in group['LIBS']:
lib_path = ''
for path_item in group['LIBPATH']:
full_path = os.path.join(path_item, item + '.lib')
if os.path.isfile(full_path): # has this library
lib_path = full_path
break
if lib_path != '':
if group_tree != None:
MDK4AddLibToGroup(ProjectFiles, group_tree, group['name'], lib_path, project_path)
else:
group_tree = MDK4AddGroupForFN(ProjectFiles, groups, group['name'], lib_path, project_path)
# write include path, definitions and link flags
IncludePath = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/IncludePath')
IncludePath.text = ';'.join([_make_path_relative(project_path, os.path.normpath(i)) for i in set(CPPPATH)])
Define = tree.find('Targets/Target/TargetOption/TargetArmAds/Cads/VariousControls/Define')
Define.text = ', '.join(set(CPPDEFINES))
Misc = tree.find('Targets/Target/TargetOption/TargetArmAds/LDads/Misc')
Misc.text = LINKFLAGS
xml_indent(root)
out.write(etree.tostring(root, encoding='utf-8').decode())
out.close()
def MDK4Project(target, script):
if os.path.isfile('template.uvproj') is False:
print ('Warning: The template project file [template.uvproj] not found!')
return
template_tree = etree.parse('template.uvproj')
MDK45Project(template_tree, target, script)
# remove project.uvopt file
project_uvopt = os.path.abspath(target).replace('uvproj', 'uvopt')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
# copy uvopt file
if os.path.exists('template.uvopt'):
import shutil
shutil.copy2('template.uvopt', 'project.uvopt')
def MDK5Project(target, script):
if os.path.isfile('template.uvprojx') is False:
print ('Warning: The template project file [template.uvprojx] not found!')
return
template_tree = etree.parse('template.uvprojx')
MDK45Project(template_tree, target, script)
# remove project.uvopt file
project_uvopt = os.path.abspath(target).replace('uvprojx', 'uvoptx')
if os.path.isfile(project_uvopt):
os.unlink(project_uvopt)
# copy uvopt file
if os.path.exists('template.uvoptx'):
import shutil
shutil.copy2('template.uvoptx', 'project.uvoptx')
def MDK2Project(target, script):
template = open('template.Uv2', "r")
lines = template.readlines()
project = open(target, "w")
project_path = os.path.dirname(os.path.abspath(target))
line_index = 5
# write group
for group in script:
lines.insert(line_index, 'Group (%s)\r\n' % group['name'])
line_index += 1
lines.insert(line_index, '\r\n')
line_index += 1
# write file
ProjectFiles = []
CPPPATH = []
CPPDEFINES = []
LINKFLAGS = ''
CFLAGS = ''
# number of groups
group_index = 1
for group in script:
# print group['name']
# get each include path
if 'CPPPATH' in group and group['CPPPATH']:
if CPPPATH:
CPPPATH += group['CPPPATH']
else:
CPPPATH += group['CPPPATH']
# get each group's definitions
if 'CPPDEFINES' in group and group['CPPDEFINES']:
if CPPDEFINES:
CPPDEFINES += group['CPPDEFINES']
else:
CPPDEFINES = group['CPPDEFINES']
# get each group's link flags
if 'LINKFLAGS' in group and group['LINKFLAGS']:
if LINKFLAGS:
LINKFLAGS += ' ' + group['LINKFLAGS']
else:
LINKFLAGS += group['LINKFLAGS']
# generate file items
for node in group['src']:
fn = node.rfile()
name = fn.name
path = os.path.dirname(fn.abspath)
basename = os.path.basename(path)
path = _make_path_relative(project_path, path)
path = os.path.join(path, name)
if ProjectFiles.count(name):
name = basename + '_' + name
ProjectFiles.append(name)
lines.insert(line_index, 'File %d,%d,<%s><%s>\r\n'
% (group_index, _get_filetype(name), path, name))
line_index += 1
group_index = group_index + 1
lines.insert(line_index, '\r\n')
line_index += 1
# remove repeat path
paths = set()
for path in CPPPATH:
inc = _make_path_relative(project_path, os.path.normpath(path))
paths.add(inc) #.replace('\\', '/')
paths = [i for i in paths]
CPPPATH = string.join(paths, ';')
definitions = [i for i in set(CPPDEFINES)]
CPPDEFINES = string.join(definitions, ', ')
while line_index < len(lines):
if lines[line_index].startswith(' ADSCINCD '):
lines[line_index] = ' ADSCINCD (' + CPPPATH + ')\r\n'
if lines[line_index].startswith(' ADSLDMC ('):
lines[line_index] = ' ADSLDMC (' + LINKFLAGS + ')\r\n'
if lines[line_index].startswith(' ADSCDEFN ('):
lines[line_index] = ' ADSCDEFN (' + CPPDEFINES + ')\r\n'
line_index += 1
# write project
for line in lines:
project.write(line)
project.close()
def ARMCC_Version():
import rtconfig
import subprocess
import re
path = rtconfig.EXEC_PATH
if(rtconfig.PLATFORM == 'armcc'):
path = os.path.join(path, 'armcc.exe')
elif(rtconfig.PLATFORM == 'armclang'):
path = os.path.join(path, 'armlink.exe')
if os.path.exists(path):
cmd = path
else:
print('Error: get armcc version failed. Please update the KEIL MDK installation path in rtconfig.py!')
return "0.0"
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = child.communicate()
'''
example stdout:
Product: MDK Plus 5.24
Component: ARM Compiler 5.06 update 5 (build 528)
Tool: armcc [4d3621]
return version: MDK Plus 5.24/ARM Compiler 5.06 update 5 (build 528)/armcc [4d3621]
'''
version_Product = re.search(r'Product: (.+)', stdout).group(1)
version_Product = version_Product[:-1]
version_Component = re.search(r'Component: (.*)', stdout).group(1)
version_Component = version_Component[:-1]
version_Tool = re.search(r'Tool: (.*)', stdout).group(1)
version_Tool = version_Tool[:-1]
version_str_format = '%s/%s/%s'
version_str = version_str_format % (version_Product, version_Component, version_Tool)
return version_str
| {
"content_hash": "b5138e33af993a247d3a4d62f28ed1c0",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 139,
"avg_line_length": 33.08735632183908,
"alnum_prop": 0.5827138192176753,
"repo_name": "RT-Thread/rt-thread",
"id": "0405eb0b1b540105741e4cb74ba96bede11017ce",
"size": "15340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/keil.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "20211623"
},
{
"name": "Batchfile",
"bytes": "77561"
},
{
"name": "C",
"bytes": "1056417995"
},
{
"name": "C++",
"bytes": "945403"
},
{
"name": "CMake",
"bytes": "250858"
},
{
"name": "CSS",
"bytes": "138218"
},
{
"name": "GDB",
"bytes": "11796"
},
{
"name": "HTML",
"bytes": "4763477"
},
{
"name": "JavaScript",
"bytes": "637"
},
{
"name": "LLVM",
"bytes": "10344"
},
{
"name": "Lex",
"bytes": "7026"
},
{
"name": "Logos",
"bytes": "7238"
},
{
"name": "Lua",
"bytes": "922"
},
{
"name": "M4",
"bytes": "17515"
},
{
"name": "Makefile",
"bytes": "485713"
},
{
"name": "Pawn",
"bytes": "1250"
},
{
"name": "Perl",
"bytes": "16728"
},
{
"name": "Python",
"bytes": "3175087"
},
{
"name": "RPC",
"bytes": "14162"
},
{
"name": "Shell",
"bytes": "422027"
},
{
"name": "Tcl",
"bytes": "179"
},
{
"name": "Yacc",
"bytes": "30555"
}
],
"symlink_target": ""
} |
import warnings
import pandas as pd
import pyspark
import pyspark as ps
from pyspark.sql.column import Column
import ibis.common.exceptions as com
import ibis.expr.schema as sch
import ibis.expr.types as types
from ibis.backends.base.sql import BaseSQLBackend
from ibis.backends.base.sql.ddl import (
CreateDatabase,
DropTable,
TruncateTable,
is_fully_qualified,
)
from ibis.expr.scope import Scope
from ibis.expr.timecontext import canonicalize_context, localize_context
from . import ddl
from .client import PySparkTable, spark_dataframe_schema
from .compiler import PySparkDatabaseTable, PySparkExprTranslator
from .datatypes import spark_dtype
_read_csv_defaults = {
'header': True,
'multiLine': True,
'mode': 'FAILFAST',
'escape': '"',
}
class _PySparkCursor:
"""Spark cursor.
This allows the Spark client to reuse machinery in
:file:`ibis/backends/base/sql/client.py`.
"""
def __init__(self, query):
"""
Construct a SparkCursor with query `query`.
Parameters
----------
query : pyspark.sql.DataFrame
Contains result of query.
"""
self.query = query
def fetchall(self):
"""Fetch all rows."""
result = self.query.collect() # blocks until finished
return result
@property
def columns(self):
"""Return the columns of the result set."""
return self.query.columns
@property
def description(self):
"""Get the fields of the result set's schema."""
return self.query.schema
def __enter__(self):
# For compatibility when constructed from Query.execute()
"""No-op for compatibility."""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""No-op for compatibility."""
class Backend(BaseSQLBackend):
name = 'pyspark'
table_class = PySparkDatabaseTable
table_expr_class = PySparkTable
def connect(self, session):
"""
Create a pyspark `Backend` for use with Ibis.
Pipes `**kwargs` into Backend, which pipes them into SparkContext.
See documentation for SparkContext:
https://spark.apache.org/docs/latest/api/python/_modules/pyspark/context.html#SparkContext
"""
new_backend = self.__class__()
new_backend._context = session.sparkContext
new_backend._session = session
new_backend._catalog = session.catalog
# Spark internally stores timestamps as UTC values, and timestamp data
# that is brought in without a specified time zone is converted as
# local time to UTC with microsecond resolution.
# https://spark.apache.org/docs/latest/sql-pyspark-pandas-with-arrow.html#timestamp-with-time-zone-semantics
new_backend._session.conf.set('spark.sql.session.timeZone', 'UTC')
return new_backend
@property
def version(self):
return pyspark.__version__
def set_database(self, name):
warnings.warn(
'`set_database` is deprecated and will be removed in a future '
'version of Ibis. Create a new connection to the desired database '
'instead',
FutureWarning,
)
self._catalog.setCurrentDatabase(name)
@property
def current_database(self):
return self._catalog.currentDatabase()
def list_databases(self, like=None):
databases = [db.name for db in self._catalog.listDatabases()]
return self._filter_with_like(databases, like)
def list_tables(self, like=None, database=None):
tables = [
t.name
for t in self._catalog.listTables(
dbName=database or self.current_database
)
]
return self._filter_with_like(tables, like)
def compile(self, expr, timecontext=None, params=None, *args, **kwargs):
"""Compile an ibis expression to a PySpark DataFrame object"""
if timecontext is not None:
session_timezone = self._session.conf.get(
'spark.sql.session.timeZone'
)
# Since spark use session timezone for tz-naive timestamps
# we localize tz-naive context here to match that behavior
timecontext = localize_context(
canonicalize_context(timecontext), session_timezone
)
# Insert params in scope
if params is None:
scope = Scope()
else:
scope = Scope(
{param.op(): raw_value for param, raw_value in params.items()},
timecontext,
)
return PySparkExprTranslator().translate(
expr, scope=scope, timecontext=timecontext
)
def execute(
self, expr, timecontext=None, params=None, limit='default', **kwargs
):
if isinstance(expr, types.TableExpr):
return self.compile(expr, timecontext, params, **kwargs).toPandas()
elif isinstance(expr, types.ColumnExpr):
# expression must be named for the projection
expr = expr.name('tmp')
return self.compile(
expr.to_projection(), timecontext, params, **kwargs
).toPandas()['tmp']
elif isinstance(expr, types.ScalarExpr):
compiled = self.compile(expr, timecontext, params, **kwargs)
if isinstance(compiled, Column):
# attach result column to a fake DataFrame and
# select the result
compiled = self._session.range(0, 1).select(compiled)
return compiled.toPandas().iloc[0, 0]
else:
raise com.IbisError(
f"Cannot execute expression of type: {type(expr)}"
)
@staticmethod
def _fully_qualified_name(name, database):
if is_fully_qualified(name):
return name
if database:
return f'{database}.`{name}`'
return name
def close(self):
"""
Close Spark connection and drop any temporary objects
"""
self._context.stop()
def fetch_from_cursor(self, cursor, schema):
df = cursor.query.toPandas() # blocks until finished
return schema.apply_to(df)
def raw_sql(self, stmt):
query = self._session.sql(stmt)
return _PySparkCursor(query)
def _get_schema_using_query(self, query):
cur = self.raw_sql(query)
return spark_dataframe_schema(cur.query)
def _get_jtable(self, name, database=None):
try:
jtable = self._catalog._jcatalog.getTable(
self._fully_qualified_name(name, database)
)
except ps.sql.utils.AnalysisException as e:
raise com.IbisInputError(str(e)) from e
return jtable
def table(self, name, database=None):
"""
Create a table expression that references a particular table or view
in the database.
Parameters
----------
name : string
database : string, optional
Returns
-------
table : TableExpr
"""
jtable = self._get_jtable(name, database)
name, database = jtable.name(), jtable.database()
qualified_name = self._fully_qualified_name(name, database)
schema = self.get_schema(qualified_name)
node = self.table_class(qualified_name, schema, self)
return self.table_expr_class(node)
def create_database(self, name, path=None, force=False):
"""
Create a new Spark database
Parameters
----------
name : string
Database name
path : string, default None
Path where to store the database data; otherwise uses Spark
default
"""
statement = CreateDatabase(name, path=path, can_exist=force)
return self.raw_sql(statement.compile())
def drop_database(self, name, force=False):
"""Drop a Spark database.
Parameters
----------
name : string
Database name
force : bool, default False
If False, Spark throws exception if database is not empty or
database does not exist
"""
statement = ddl.DropDatabase(name, must_exist=not force, cascade=force)
return self.raw_sql(statement.compile())
def get_schema(self, table_name, database=None):
"""
Return a Schema object for the indicated table and database
Parameters
----------
table_name : string
May be fully qualified
database : string
Spark does not have a database argument for its table() method,
so this must be None
Returns
-------
schema : ibis Schema
"""
if database is not None:
raise com.UnsupportedArgumentError(
'Spark does not support database param for table'
)
df = self._session.table(table_name)
return sch.infer(df)
def _schema_from_csv(self, path, **kwargs):
"""
Return a Schema object for the indicated csv file. Spark goes through
the file once to determine the schema. See documentation for
`pyspark.sql.DataFrameReader` for kwargs.
Parameters
----------
path : string
Returns
-------
schema : ibis Schema
"""
options = _read_csv_defaults.copy()
options.update(kwargs)
options['inferSchema'] = True
df = self._session.read.csv(path, **options)
return spark_dataframe_schema(df)
def _create_table_or_temp_view_from_csv(
self,
name,
path,
schema=None,
database=None,
force=False,
temp_view=False,
format='parquet',
**kwargs,
):
options = _read_csv_defaults.copy()
options.update(kwargs)
if schema:
assert ('inferSchema', True) not in options.items()
schema = spark_dtype(schema)
options['schema'] = schema
else:
options['inferSchema'] = True
df = self._session.read.csv(path, **options)
if temp_view:
if force:
df.createOrReplaceTempView(name)
else:
df.createTempView(name)
else:
qualified_name = self._fully_qualified_name(
name, database or self.current_database
)
mode = 'error'
if force:
mode = 'overwrite'
df.write.saveAsTable(qualified_name, format=format, mode=mode)
def create_table(
self,
table_name,
obj=None,
schema=None,
database=None,
force=False,
# HDFS options
format='parquet',
):
"""
Create a new table in Spark using an Ibis table expression.
Parameters
----------
table_name : string
obj : TableExpr or pandas.DataFrame, optional
If passed, creates table from select statement results
schema : ibis.Schema, optional
Mutually exclusive with obj, creates an empty table with a
particular schema
database : string, default None (optional)
force : boolean, default False
If true, create table if table with indicated name already exists
format : {'parquet'}
Examples
--------
>>> con.create_table('new_table_name', table_expr) # doctest: +SKIP
"""
if obj is not None:
if isinstance(obj, pd.DataFrame):
spark_df = self._session.createDataFrame(obj)
mode = 'error'
if force:
mode = 'overwrite'
spark_df.write.saveAsTable(
table_name, format=format, mode=mode
)
return
ast = self.compiler.to_ast(obj)
select = ast.queries[0]
statement = ddl.CTAS(
table_name,
select,
database=database,
can_exist=force,
format=format,
)
elif schema is not None:
statement = ddl.CreateTableWithSchema(
table_name,
schema,
database=database,
format=format,
can_exist=force,
)
else:
raise com.IbisError('Must pass expr or schema')
return self.raw_sql(statement.compile())
def create_view(
self, name, expr, database=None, can_exist=False, temporary=False
):
"""
Create a Spark view from a table expression
Parameters
----------
name : string
expr : ibis TableExpr
database : string, default None
can_exist : boolean, default False
Replace an existing view of the same name if it exists
temporary : boolean, default False
"""
ast = self.compiler.to_ast(expr)
select = ast.queries[0]
statement = ddl.CreateView(
name,
select,
database=database,
can_exist=can_exist,
temporary=temporary,
)
return self.raw_sql(statement.compile())
def drop_table(self, name, database=None, force=False):
self.drop_table_or_view(name, database, force)
def drop_view(self, name, database=None, force=False):
self.drop_table_or_view(name, database, force)
def drop_table_or_view(self, name, database=None, force=False):
"""
Drop a Spark table or view
Parameters
----------
name : string
database : string, default None (optional)
force : boolean, default False
Database may throw exception if table does not exist
Examples
--------
>>> table = 'my_table'
>>> db = 'operations'
>>> con.drop_table_or_view(table, db, force=True) # doctest: +SKIP
"""
statement = DropTable(name, database=database, must_exist=not force)
self.raw_sql(statement.compile())
def truncate_table(self, table_name, database=None):
"""
Delete all rows from, but do not drop, an existing table
Parameters
----------
table_name : string
database : string, default None (optional)
"""
statement = TruncateTable(table_name, database=database)
self.raw_sql(statement.compile())
def insert(
self,
table_name,
obj=None,
database=None,
overwrite=False,
values=None,
validate=True,
):
"""
Insert into existing table.
See SparkTable.insert for other parameters.
Parameters
----------
table_name : string
database : string, default None
Examples
--------
>>> table = 'my_table'
>>> con.insert(table, table_expr) # doctest: +SKIP
# Completely overwrite contents
>>> con.insert(table, table_expr, overwrite=True) # doctest: +SKIP
"""
table = self.table(table_name, database=database)
return table.insert(
obj=obj, overwrite=overwrite, values=values, validate=validate
)
def compute_stats(self, name, database=None, noscan=False):
"""
Issue COMPUTE STATISTICS command for a given table
Parameters
----------
name : string
Can be fully qualified (with database name)
database : string, optional
noscan : boolean, default False
If True, collect only basic statistics for the table (number of
rows, size in bytes).
"""
maybe_noscan = ' NOSCAN' if noscan else ''
stmt = 'ANALYZE TABLE {} COMPUTE STATISTICS{}'.format(
self._fully_qualified_name(name, database), maybe_noscan
)
return self.raw_sql(stmt)
| {
"content_hash": "f48f808fc022d8f5e7581282fdfd5c50",
"timestamp": "",
"source": "github",
"line_count": 530,
"max_line_length": 116,
"avg_line_length": 30.39245283018868,
"alnum_prop": 0.5724484728085424,
"repo_name": "cloudera/ibis",
"id": "86b1f125661bf95b48ce63a9b48e679ba2b5d658",
"size": "16108",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ibis/backends/pyspark/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "44943"
},
{
"name": "CMake",
"bytes": "4383"
},
{
"name": "Python",
"bytes": "2570944"
},
{
"name": "Shell",
"bytes": "1989"
}
],
"symlink_target": ""
} |
def game_log(text):
global log
# Create the log if it does not exist
try:
log
except NameError:
log = []
log.append(text)
def get_game_log_html():
global log
try:
return '<br>'.join(log)
except NameError:
return 'The game has not yet begun!'
| {
"content_hash": "b0df6782a3b3a3555c019db8508f16b7",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 44,
"avg_line_length": 20.466666666666665,
"alnum_prop": 0.5667752442996743,
"repo_name": "robertecurtin/plutos-envy",
"id": "a05d9070c3e14c22d74f080855cf8807f6d6779f",
"size": "393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "game/gamelog.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "2744"
},
{
"name": "Python",
"bytes": "15684"
},
{
"name": "Shell",
"bytes": "518"
}
],
"symlink_target": ""
} |
import json
from datetime import datetime, timedelta
from unittest import mock
import redis
import responses
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils.timezone import get_current_timezone, now
from mockredis import mock_redis_client
from frigg.authentication.models import User
from frigg.builds.models import Build, BuildResult, Project
r = redis.Redis(**settings.REDIS_SETTINGS)
class ProjectTestCase(TestCase):
fixtures = ['frigg/builds/fixtures/users.json']
def test___str__(self):
project = Project.objects.create(owner='frigg', name='frigg-worker')
self.assertEqual(str(project), 'frigg / frigg-worker')
def test_clone_url_public(self):
project = Project(owner='frigg', name='frigg-worker', private=False)
self.assertEqual(project.clone_url, 'https://github.com/frigg/frigg-worker.git')
@mock.patch('frigg.builds.models.Project.github_token', '')
def test_clone_url_private(self):
project = Project(owner='frigg', name='chewie', private=True)
self.assertEqual(project.clone_url, 'https://@github.com/frigg/chewie.git')
@mock.patch('frigg.builds.models.Project.github_token', '')
def test_clone_url_ssh(self):
project = Project(owner='frigg', name='chewie', should_clone_with_ssh=True)
self.assertEqual(project.clone_url, '[email protected]:frigg/chewie.git')
def test_last_build_number(self):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
self.assertEqual(project.last_build_number, 0)
Build.objects.create(project=project, build_number=42)
self.assertEqual(project.last_build_number, 42)
def test_auto_approval(self):
project = Project.objects.create(owner='frigg', name='frigg')
self.assertTrue(project.approved)
@mock.patch('frigg.helpers.github.list_collaborators', lambda x: ['dumbledore'])
def test_update_members(self):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
project.update_members()
self.assertEqual(project.members.all().count(), 1)
def test_start(self):
project = Project.objects.create(owner='frigg', name='frigg')
build = project.start_build({
'branch': 'b',
'sha': 's',
'author': 'dumbledore',
'pull_request_id': 0,
'message': '',
})
self.assertEqual(build.branch, 'b')
self.assertEqual(build.sha, 's')
self.assertEqual(build.author, 'dumbledore')
self.assertEqual(build.pull_request_id, 0)
self.assertEqual(build.build_number, 1)
self.assertEqual(project.last_build_number, 1)
@mock.patch('frigg.builds.models.Build.start')
def test_start_pull_request_with_earlier_build(self, mock_start):
data = {
'branch': 'b',
'sha': 's',
'author': 'dumbledore',
'pull_request_id': 0,
'message': '',
}
project = Project.objects.create(owner='frigg', name='frigg')
project.start_build(data)
self.assertEqual(project.builds.count(), 1)
self.assertEqual(project.last_build_number, 1)
data['pull_request_id'] = 1
self.assertEqual(project.builds.count(), 1)
data['pull_request_id'] = 1
build = project.start_build(data)
self.assertEqual(build.branch, 'b')
self.assertEqual(build.sha, 's')
self.assertEqual(build.author, 'dumbledore')
self.assertEqual(build.pull_request_id, 1)
self.assertEqual(build.build_number, 1)
self.assertEqual(project.last_build_number, 1)
def test_average_time(self):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
build_options = dict(project=project, build_number=1,
start_time=datetime(2015, 5, 5, 5, 5, tzinfo=get_current_timezone()),
end_time=datetime(2015, 5, 5, 5, 15, tzinfo=get_current_timezone()))
builds = [Build.objects.create(**build_options)]
build_options = dict(project=project, build_number=2,
start_time=datetime(2015, 5, 5, 5, 5, tzinfo=get_current_timezone()),
end_time=datetime(2015, 5, 5, 5, 25, tzinfo=get_current_timezone()))
builds += [Build.objects.create(**build_options)]
self.assertEqual(project.average_time, timedelta(minutes=15))
def test_number_of_members(self):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
self.assertEqual(project.number_of_members, 0)
project.members.add(User.objects.get(pk=1))
self.assertEqual(project.number_of_members, 1)
@mock.patch('frigg.builds.models.get_badge')
def test_get_badge_should_call_badge_with_last_build(self, mock_get_badge):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
build = Build.objects.create(project=project)
BuildResult.objects.create(build=build, succeeded=True)
self.assertIsNotNone(project.get_badge())
mock_get_badge.assert_called_once_with(True)
@mock.patch('frigg.builds.models.get_unknown_badge')
def test_get_badge_should_call_unknown_badge_if_no_build(self, mock_get_unknown_badge):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
self.assertIsNotNone(project.get_badge())
mock_get_unknown_badge.assert_called_once_with('build')
@mock.patch('frigg.builds.models.get_coverage_badge')
def test_get_coverage_badge_should_call_coverage_badge_with_last_build(self, mock_get_badge):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
build = Build.objects.create(project=project)
BuildResult.objects.create(build=build, succeeded=True, coverage=98)
self.assertIsNotNone(project.get_coverage_badge())
mock_get_badge.assert_called_once_with(98)
@mock.patch('frigg.builds.models.get_unknown_badge')
def test_get_coverage_badge_should_call_unknown_badge_if_no_buildt(self, mock_get_unknown):
project = Project.objects.create(owner='frigg', name='frigg-worker', private=False)
self.assertIsNotNone(project.get_coverage_badge())
mock_get_unknown.assert_called_once_with('coverage')
class BuildTestCase(TestCase):
fixtures = ['frigg/builds/fixtures/users.json']
def setUp(self):
r.flushall()
self.project = Project.objects.create(owner='frigg', name='frigg-worker', approved=True)
def test___str__(self):
build = Build.objects.create(project=self.project, branch='master', build_number=1)
self.assertEqual(str(build), 'frigg / frigg-worker / master #1')
@mock.patch('frigg.builds.models.Project.github_token', 'token')
def test_queue_object(self):
build = Build.objects.create(project=self.project, branch='master', sha='s', build_number=1)
obj = build.queue_object
self.assertEqual(obj['id'], build.pk)
self.assertEqual(obj['branch'], build.branch)
self.assertEqual(obj['sha'], build.sha)
self.assertEqual(obj['image'], settings.DEFAULT_BUILD_IMAGE)
self.assertEqual(obj['clone_url'], build.project.clone_url)
self.assertEqual(obj['owner'], build.project.owner)
self.assertEqual(obj['name'], build.project.name)
self.assertEqual(obj['gh_token'], 'token')
self.assertFalse('pull_request_id' in obj)
build.pull_request_id = 42
obj = build.queue_object
self.assertEqual(obj['pull_request_id'], 42)
def test_queue_object_have_environment_variables(self):
self.project.environment_variables.create(key='V', value=42, is_secret=False)
build = Build.objects.create(project=self.project, branch='master', sha='s', build_number=1)
obj = build.queue_object
assert obj['environment_variables']['V'] == '42'
assert 'V' not in obj['secrets']
def test_queue_object_have_secrets_when_no_pull_request(self):
self.project.environment_variables.create(key='V', value=40, is_secret=True)
build = Build.objects.create(project=self.project, branch='master', sha='s', build_number=1)
obj = build.queue_object
assert obj['secrets']['V'] == '40'
assert 'V' not in obj['environment_variables']
def test_queue_object_not_have_secrets_when_no_pull_request_and_custom_branch(self):
self.project.environment_variables.create(key='V', value=40, is_secret=True)
build = Build.objects.create(project=self.project, branch='custom', sha='s', build_number=1)
obj = build.queue_object
assert 'V' not in obj['secrets']
assert 'V' not in obj['environment_variables']
def test_queue_object_not_have_secrets_when_pull_request(self):
self.project.environment_variables.create(key='V', value=40, is_secret=True)
build = Build.objects.create(
project=self.project,
branch='master',
sha='s',
build_number=1,
pull_request_id=2
)
obj = build.queue_object
assert 'V' not in obj['secrets']
def test_queue_set_custom_image(self):
custom_docker_image = 'frigg/frigg-test-dind'
project = Project.objects.create(image=custom_docker_image)
build = Build.objects.create(project=project)
obj = build.queue_object
self.assertEqual(obj['id'], build.pk)
self.assertEqual(obj['image'], custom_docker_image)
def test_color(self):
build = Build.objects.create(project=self.project, branch='master', build_number=1)
self.assertEqual(build.color, 'orange')
result = BuildResult.objects.create(build=build, succeeded=True, result_log=[])
self.assertEqual(build.color, 'green')
result.still_running = True
self.assertEqual(build.color, 'orange')
result.still_running = False
result.succeeded = False
self.assertEqual(build.color, 'red')
result.result_log = [{'task': ''}]
self.assertEqual(build.color, 'gray')
@responses.activate
def test_send_webhook(self):
responses.add(
responses.POST,
'http://w.frigg.io',
body='Ok',
content_type='application/json'
)
build = Build.objects.create(project=self.project, branch='master', build_number=1)
BuildResult.objects.create(build=build, succeeded=True)
response = build.send_webhook('http://w.frigg.io')
request = json.loads(response.request.body)
self.assertEqual(request['sha'], build.sha)
self.assertEqual(request['build_url'], build.get_absolute_url())
self.assertEqual(request['state'], build.result.succeeded)
@mock.patch('frigg.helpers.github.set_commit_status')
@mock.patch('redis.Redis', mock_redis_client)
def test_start(self, mock_set_commit_status):
build = Build.objects.create(project=self.project, branch='master', build_number=1)
BuildResult.objects.create(build=build, succeeded=True)
build.start()
self.assertEqual(BuildResult.objects.all().count(), 0)
self.assertTrue(mock_set_commit_status.called)
@mock.patch('frigg.helpers.github.set_commit_status')
@mock.patch('redis.Redis', mock_redis_client)
def test_start_restart_should_not_have_end_time(self, mock_set_commit_status):
build = Build.objects.create(project=self.project, branch='master', build_number=1,
end_time=now())
build.start()
build = Build.objects.get(project=self.project, build_number=1)
self.assertIsNone(build.end_time)
self.assertTrue(mock_set_commit_status.called)
@mock.patch('frigg.builds.models.BuildResult.create_not_approved')
@mock.patch('redis.Redis', mock_redis_client)
def test_start_not_approved(self, mock_create_not_approved):
project = Project.objects.create(owner='tind', name='frigg', approved=False)
build = Build.objects.create(project=project, branch='master', build_number=1)
build.start()
self.assertTrue(mock_create_not_approved.called)
@mock.patch('frigg.builds.models.Build.start')
def test_restart_should_start_if_not_in_queue(self, mock_start):
project = Project.objects.create(owner='tind', name='frigg', approved=False)
build = Build.objects.create(project=project, branch='master', build_number=1)
build.start()
r.rpop(project.queue_name)
assert r.llen(project.queue_name) == 0
build.restart()
assert mock_start.called
@mock.patch('frigg.builds.models.Build.start')
def test_restart_should_not_start_if_already_in_queue(self, mock_start):
project = Project.objects.create(owner='tind', name='frigg', approved=False)
build = Build.objects.create(project=project, branch='master', build_number=1)
r.lpush(project.queue_name, json.dumps(build.queue_object))
build.restart()
assert not mock_start.called
def test_has_timed_out(self):
project = Project.objects.create(owner='frigg', name='frigg')
build = Build.objects.create(project=project, build_number=1,
start_time=now() - timedelta(minutes=61))
self.assertTrue(build.has_timed_out())
build.start_time = now()
self.assertFalse(build.has_timed_out())
with mock.patch('frigg.builds.models.Project.average_time', timedelta(seconds=120)):
self.assertFalse(build.has_timed_out())
build.start_time = now() - timedelta(seconds=60)
self.assertFalse(build.has_timed_out())
build.start_time = now() - timedelta(seconds=400)
self.assertTrue(build.has_timed_out())
def test_author_user(self):
user = get_user_model().objects.get(pk=1)
build = Build(
project=self.project,
branch='master',
build_number=1,
author=user.username
)
self.assertEqual(build.author_user, user)
build.author = 'i'
self.assertIsNone(build.author_user)
def test_short_message(self):
build = Build(
project=self.project,
branch='master',
build_number=1,
message='Multi\nLine\nMessage'
)
self.assertEqual(build.short_message, 'Multi')
build = Build(
project=self.project,
branch='master',
build_number=1,
message='Single line message'
)
self.assertEqual(build.short_message, 'Single line message')
def test_rendered_message(self):
build = Build(
project=self.project,
branch='master',
build_number=1,
message='Single **line** message'
)
self.assertEqual(build.rendered_message, '<p>Single <strong>line</strong> message</p>')
@mock.patch('frigg.builds.models.Build.send_webhook')
@mock.patch('frigg.helpers.github.set_commit_status')
def test_handle_worker_report(self, mock_set_commit_status, mock_send_webhook):
build = Build.objects.create(
project=self.project,
branch='master',
build_number=1,
)
build.handle_worker_report({
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/frigg-worker.git',
'name': 'frigg-worker',
'branch': 'master',
'owner': 'frigg',
'id': 1,
'results': [
{'task': 'make test', 'return_code': 0, 'succeeded': True, 'log': 'log'},
{'task': 'make test'}
],
'webhooks': ['http://example.com']
})
self.assertIsNotNone(Build.objects.get(pk=build.id).end_time)
mock_set_commit_status.assert_called_once_with(build)
mock_send_webhook.assert_called_once_with('http://example.com')
@mock.patch('frigg.builds.models.Build.send_webhook')
@mock.patch('frigg.helpers.github.set_commit_status')
def test_handle_worker_host(self, mock_set_commit_status, mock_send_webhook):
build = Build.objects.create(
project=self.project,
branch='master',
build_number=1,
)
build.handle_worker_report({
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/frigg-worker.git',
'name': 'frigg-worker',
'branch': 'master',
'owner': 'frigg',
'id': 1,
'results': [
{'task': 'make test', 'return_code': 0, 'succeeded': True, 'log': 'log'},
{'task': 'make test'}
],
'webhooks': ['http://example.com']
})
self.assertIsNotNone(Build.objects.get(pk=build.id).end_time)
mock_set_commit_status.assert_called_once_with(build)
mock_send_webhook.assert_called_once_with('http://example.com')
@mock.patch('frigg.builds.models.Build.send_webhook')
@mock.patch('frigg.helpers.github.set_commit_status')
def test_handle_worker_report_still_running(self, mock_set_commit_status, mock_send_webhook):
build = Build.objects.create(
project=self.project,
branch='master',
build_number=1,
)
build.handle_worker_report({
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/frigg-worker.git',
'name': 'frigg-worker',
'branch': 'master',
'owner': 'frigg',
'worker_host': 'albus.frigg.io',
'finished': False,
'id': 1,
'results': [
{'task': 'make test', 'return_code': 0, 'succeeded': True, 'log': 'log'},
{'task': 'flake8', 'pending': True},
{'task': 'make test'}
],
'webhooks': ['http://example.com']
})
self.assertIsNone(Build.objects.get(pk=build.id).end_time)
self.assertEqual(build.result.worker_host, 'albus.frigg.io')
@mock.patch('frigg.builds.models.Project.average_time', timedelta(minutes=10))
def test_estimated_finish_time(self):
build = Build(
project=self.project,
)
self.assertEqual(build.estimated_finish_time, None)
build.start_time = now()
self.assertEqual(build.estimated_finish_time.day, (now() + timedelta(minutes=10)).day)
self.assertEqual(build.estimated_finish_time.hour, (now() + timedelta(minutes=10)).hour)
self.assertEqual(build.estimated_finish_time.minute, (now() + timedelta(minutes=10)).minute)
@mock.patch('frigg.deployments.models.PRDeployment.start')
def test_initiate_deployment_with_specified_image(self, mock_deployment_start):
start_time = datetime(2012, 12, 12, tzinfo=get_current_timezone())
b1 = Build.objects.create(project=self.project, branch='master',
build_number=4, start_time=start_time)
deployment = b1.initiate_deployment({'image': 'frigg/super-image'})
self.assertEqual(deployment.image, 'frigg/super-image')
self.assertTrue(mock_deployment_start.called_once)
@mock.patch('frigg.deployments.models.PRDeployment.start')
def test_initiate_deployment_without_specified_image(self, mock_deployment_start):
start_time = datetime(2012, 12, 12, tzinfo=get_current_timezone())
b1 = Build.objects.create(project=self.project, branch='master',
build_number=4, start_time=start_time)
deployment = b1.initiate_deployment({})
self.assertEqual(deployment.image, settings.FRIGG_PREVIEW_IMAGE)
self.assertTrue(mock_deployment_start.called_once)
def test_delete_logs_should_remove_logs(self):
build = Build.objects.create(project=self.project, branch='master', build_number=4)
result = BuildResult.objects.create(
build=build,
setup_log=[{"item": "something"}],
service_log=[{"item": "something"}],
result_log=[{"item": "something"}],
after_log=[{"item": "something"}],
)
build.delete_logs()
result = BuildResult.objects.get(pk=result.pk)
self.assertEqual(result.setup_log, [])
self.assertEqual(result.service_tasks, [])
self.assertEqual(result.result_log, [])
self.assertEqual(result.after_tasks, [])
class BuildResultTestCase(TestCase):
def setUp(self):
self.project = Project.objects.create(owner='frigg', name='frigg-worker')
self.build = Build.objects.create(project=self.project, branch='master', build_number=1)
def test___str__(self):
result = BuildResult.objects.create(build=self.build)
self.assertEqual(str(result), 'frigg / frigg-worker / master #1')
def test_evaluate_results(self):
self.assertTrue(BuildResult.evaluate_results([{'succeeded': True}]))
self.assertTrue(BuildResult.evaluate_results([{'succeeded': True}, {}]))
self.assertFalse(BuildResult.evaluate_results([
{'succeeded': True},
{'succeeded': False}
]))
self.assertFalse(BuildResult.evaluate_results([
{'succeeded': False},
{'succeeded': True}
]))
self.assertFalse(BuildResult.evaluate_results([{'succeeded': False}, {}]))
def test_create_not_approved(self):
result = BuildResult.create_not_approved(self.build)
self.assertEqual(result.build_id, self.build.pk)
self.assertFalse(result.succeeded)
assert result.tasks[0]['error'] == 'This project is not approved.'
assert result.setup_tasks == []
assert result.service_tasks == []
def test_create_from_worker_payload(self):
BuildResult.create_from_worker_payload(self.build, {
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/frigg-worker.git',
'name': 'frigg-worker',
'branch': 'master',
'owner': 'frigg',
'worker_host': 'albus.frigg.io',
'finished': False,
'id': 1,
'results': [
{'task': 'make test', 'return_code': 0, 'succeeded': True, 'log': 'log'},
{'task': 'flake8', 'pending': True},
{'task': 'make test'}
],
'service_results': [
{'task': 'service postgresql start', 'return_code': 0, 'succeeded': True,
'log': 'log'},
],
'setup_results': [
{'task': 'make', 'return_code': 0, 'succeeded': True, 'log': 'log'},
],
'after_results': [
{'task': 'after', 'return_code': 0, 'succeeded': True, 'log': 'log'},
],
'webhooks': ['http://example.com']
})
assert self.build.result.worker_host == 'albus.frigg.io'
assert self.build.result.still_running
assert isinstance(self.build.result.tasks, list)
assert isinstance(self.build.result.setup_log, list)
assert isinstance(self.build.result.service_tasks, list)
assert isinstance(self.build.result.after_tasks, list)
def test_create_from_worker_payload_without_optional_results(self):
BuildResult.create_from_worker_payload(self.build, {
'sha': 'superbhash',
'clone_url': 'https://github.com/frigg/frigg-worker.git',
'name': 'frigg-worker',
'branch': 'master',
'owner': 'frigg',
'worker_host': 'albus.frigg.io',
'finished': False,
'id': 1,
'results': [
{'task': 'make test', 'return_code': 0, 'succeeded': True, 'log': 'log'},
{'task': 'flake8', 'pending': True},
{'task': 'make test'}
],
'webhooks': ['http://example.com']
})
assert isinstance(self.build.result.tasks, list)
assert isinstance(self.build.result.setup_log, list)
assert isinstance(self.build.result.service_tasks, list)
assert isinstance(self.build.result.after_tasks, list)
def test_tasks(self):
data = [
{'task': 'tox', 'log': '{}', 'return_code': 0},
{'task': 'tox', 'log': 'tested all the stuff\n1!"#$%&/()=?', 'return_code': 11},
{'task': 'tox', 'return_log': 'fail', 'return_code': 'd'}
]
result = BuildResult.objects.create(
build=self.build,
result_log=data
)
self.assertEqual(len(result.tasks), 3)
self.assertEqual(result.tasks, data)
def test_service_tasks(self):
data = [
{'task': 'tox', 'log': '{}', 'return_code': 0},
{'task': 'tox', 'log': 'tested all the stuff\n1!"#$%&/()=?', 'return_code': 11},
{'task': 'tox', 'return_log': 'fail', 'return_code': 'd'}
]
result = BuildResult.objects.create(
build=self.build,
service_log=data
)
self.assertEqual(len(result.service_tasks), 3)
self.assertEqual(result.service_tasks, data)
def test_setup_tasks(self):
data = [
{'task': 'tox', 'log': '{}', 'return_code': 0},
{'task': 'tox', 'log': 'tested all the stuff\n1!"#$%&/()=?', 'return_code': 11},
{'task': 'tox', 'return_log': 'fail', 'return_code': 'd'}
]
result = BuildResult.objects.create(
build=self.build,
setup_log=data
)
self.assertEqual(len(result.setup_tasks), 3)
self.assertEqual(result.setup_tasks, data)
def test_coverage_diff(self):
start_time = datetime(2012, 12, 12, tzinfo=get_current_timezone())
b1 = Build.objects.create(project=self.project, branch='i', build_number=4,
start_time=start_time)
positive_change = BuildResult.objects.create(build=b1, coverage=100)
self.assertEqual(positive_change.coverage_diff, 100)
master = Build.objects.create(project=self.project, branch='master', build_number=3,
end_time=start_time - timedelta(hours=1))
BuildResult.objects.create(build=master, coverage=20)
# Need to fetch again to come around cached_property
self.assertEqual(BuildResult.objects.get(pk=positive_change.pk).coverage_diff, 80)
b2 = Build.objects.create(project=self.project, branch='i', build_number=5,
start_time=start_time)
negative_change = BuildResult.objects.create(build=b2, coverage=10)
self.assertEqual(negative_change.coverage_diff, -10)
b3 = Build.objects.create(project=self.project, branch='i', build_number=6,
start_time=start_time)
no_change = BuildResult.objects.create(build=b3, coverage=20)
self.assertEqual(no_change.coverage_diff, 0)
| {
"content_hash": "ecb54e44e2926564950858e2ed012883",
"timestamp": "",
"source": "github",
"line_count": 616,
"max_line_length": 100,
"avg_line_length": 45.300324675324674,
"alnum_prop": 0.5979931911843755,
"repo_name": "frigg/frigg-hq",
"id": "7f9aad0bea49adc6197a2546cf9c1f1dcf825eb6",
"size": "27928",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/builds/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3504"
},
{
"name": "HTML",
"bytes": "8114"
},
{
"name": "JavaScript",
"bytes": "5982"
},
{
"name": "Makefile",
"bytes": "1164"
},
{
"name": "Python",
"bytes": "182545"
}
],
"symlink_target": ""
} |
"""Utility functions (file reading, simple IDL parsing by regexes) for IDL build.
Design doc: http://www.chromium.org/developers/design-documents/idl-build
"""
import os
import cPickle as pickle
import re
import string
import subprocess
KNOWN_COMPONENTS = frozenset(['core', 'modules'])
KNOWN_COMPONENTS_WITH_TESTING = frozenset(['core', 'modules', 'testing'])
def idl_filename_to_interface_name(idl_filename):
# interface name is the root of the basename: InterfaceName.idl
return os.path.splitext(os.path.basename(idl_filename))[0]
def idl_filename_to_component_with_known_components(idl_filename, known_components):
path = os.path.dirname(os.path.realpath(idl_filename))
while path:
dirname, basename = os.path.split(path)
if not basename:
break
if basename.lower() in known_components:
return basename.lower()
path = dirname
raise Exception('Unknown component type for %s' % idl_filename)
def idl_filename_to_component(idl_filename):
return idl_filename_to_component_with_known_components(idl_filename, KNOWN_COMPONENTS)
def is_testing_target(idl_filename):
component = idl_filename_to_component_with_known_components(idl_filename, KNOWN_COMPONENTS_WITH_TESTING)
return component == 'testing'
# See whether "component" can depend on "dependency" or not:
# Suppose that we have interface X and Y:
# - if X is a partial interface and Y is the original interface,
# use is_valid_component_dependency(X, Y).
# - if X implements Y, use is_valid_component_dependency(X, Y)
# Suppose that X is a cpp file and Y is a header file:
# - if X includes Y, use is_valid_component_dependency(X, Y)
def is_valid_component_dependency(component, dependency):
assert component in KNOWN_COMPONENTS
assert dependency in KNOWN_COMPONENTS
if component == 'core' and dependency == 'modules':
return False
return True
class ComponentInfoProvider(object):
"""Base class of information provider which provides component-specific
information.
"""
def __init__(self):
pass
@property
def interfaces_info(self):
return {}
@property
def component_info(self):
return {}
@property
def enumerations(self):
return {}
@property
def typedefs(self):
return {}
@property
def union_types(self):
return set()
@property
def include_path_for_union_types(self):
return None
class ComponentInfoProviderCore(ComponentInfoProvider):
def __init__(self, interfaces_info, component_info):
super(ComponentInfoProviderCore, self).__init__()
self._interfaces_info = interfaces_info
self._component_info = component_info
@property
def interfaces_info(self):
return self._interfaces_info
@property
def component_info(self):
return self._component_info
@property
def enumerations(self):
return self._component_info['enumerations']
@property
def typedefs(self):
return self._component_info['typedefs']
@property
def union_types(self):
return self._component_info['union_types']
@property
def include_path_for_union_types(self):
return 'bindings/core/v8/UnionTypesCore.h'
@property
def specifier_for_export(self):
return 'CORE_EXPORT '
@property
def include_path_for_export(self):
return 'core/CoreExport.h'
class ComponentInfoProviderModules(ComponentInfoProvider):
def __init__(self, interfaces_info, component_info_core,
component_info_modules):
super(ComponentInfoProviderModules, self).__init__()
self._interfaces_info = interfaces_info
self._component_info_core = component_info_core
self._component_info_modules = component_info_modules
@property
def interfaces_info(self):
return self._interfaces_info
@property
def component_info(self):
return self._component_info_modules
@property
def enumerations(self):
enums = self._component_info_core['enumerations'].copy()
enums.update(self._component_info_modules['enumerations'])
return enums
@property
def typedefs(self):
typedefs = self._component_info_core['typedefs'].copy()
typedefs.update(self._component_info_modules['typedefs'])
return typedefs
@property
def union_types(self):
# Remove duplicate union types from component_info_modules to avoid
# generating multiple container generation.
return self._component_info_modules['union_types'] - self._component_info_core['union_types']
@property
def include_path_for_union_types(self):
return 'bindings/modules/v8/UnionTypesModules.h'
@property
def specifier_for_export(self):
return 'MODULES_EXPORT '
@property
def include_path_for_export(self):
return 'modules/ModulesExport.h'
def load_interfaces_info_overall_pickle(info_dir):
with open(os.path.join(info_dir, 'modules', 'InterfacesInfoOverall.pickle')) as interface_info_file:
return pickle.load(interface_info_file)
def merge_dict_recursively(target, diff):
"""Merges two dicts into one.
|target| will be updated with |diff|. Part of |diff| may be re-used in
|target|.
"""
for key, value in diff.iteritems():
if key not in target:
target[key] = value
elif type(value) == dict:
merge_dict_recursively(target[key], value)
elif type(value) == list:
target[key].extend(value)
elif type(value) == set:
target[key].update(value)
else:
# Testing IDLs want to overwrite the values. Production code
# doesn't need any overwriting.
target[key] = value
def create_component_info_provider_core(info_dir):
interfaces_info = load_interfaces_info_overall_pickle(info_dir)
with open(os.path.join(info_dir, 'core', 'ComponentInfoCore.pickle')) as component_info_file:
component_info = pickle.load(component_info_file)
return ComponentInfoProviderCore(interfaces_info, component_info)
def create_component_info_provider_modules(info_dir):
interfaces_info = load_interfaces_info_overall_pickle(info_dir)
with open(os.path.join(info_dir, 'core', 'ComponentInfoCore.pickle')) as component_info_file:
component_info_core = pickle.load(component_info_file)
with open(os.path.join(info_dir, 'modules', 'ComponentInfoModules.pickle')) as component_info_file:
component_info_modules = pickle.load(component_info_file)
return ComponentInfoProviderModules(
interfaces_info, component_info_core, component_info_modules)
def create_component_info_provider(info_dir, component):
if component == 'core':
return create_component_info_provider_core(info_dir)
elif component == 'modules':
return create_component_info_provider_modules(info_dir)
else:
return ComponentInfoProvider()
################################################################################
# Basic file reading/writing
################################################################################
def get_file_contents(filename):
with open(filename) as f:
return f.read()
def read_file_to_list(filename):
"""Returns a list of (stripped) lines for a given filename."""
with open(filename) as f:
return [line.rstrip('\n') for line in f]
def resolve_cygpath(cygdrive_names):
if not cygdrive_names:
return []
cmd = ['cygpath', '-f', '-', '-wa']
process = subprocess.Popen(cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
idl_file_names = []
for file_name in cygdrive_names:
process.stdin.write('%s\n' % file_name)
process.stdin.flush()
idl_file_names.append(process.stdout.readline().rstrip())
process.stdin.close()
process.wait()
return idl_file_names
def read_idl_files_list_from_file(filename):
"""Similar to read_file_to_list, but also resolves cygpath."""
with open(filename) as input_file:
file_names = sorted([os.path.realpath(line.rstrip('\n'))
for line in input_file])
idl_file_names = [file_name for file_name in file_names
if not file_name.startswith('/cygdrive')]
cygdrive_names = [file_name for file_name in file_names
if file_name.startswith('/cygdrive')]
idl_file_names.extend(resolve_cygpath(cygdrive_names))
return idl_file_names
def read_pickle_files(pickle_filenames):
for pickle_filename in pickle_filenames:
with open(pickle_filename) as pickle_file:
yield pickle.load(pickle_file)
def write_file(new_text, destination_filename, only_if_changed):
if only_if_changed and os.path.isfile(destination_filename):
with open(destination_filename) as destination_file:
if destination_file.read() == new_text:
return
destination_dirname = os.path.dirname(destination_filename)
if not os.path.exists(destination_dirname):
os.makedirs(destination_dirname)
with open(destination_filename, 'w') as destination_file:
destination_file.write(new_text)
def write_pickle_file(pickle_filename, data, only_if_changed):
if only_if_changed and os.path.isfile(pickle_filename):
with open(pickle_filename) as pickle_file:
try:
if pickle.load(pickle_file) == data:
return
except Exception:
# If trouble unpickling, overwrite
pass
with open(pickle_filename, 'w') as pickle_file:
pickle.dump(data, pickle_file)
################################################################################
# IDL parsing
#
# We use regular expressions for parsing; this is incorrect (Web IDL is not a
# regular language), but simple and sufficient in practice.
# Leading and trailing context (e.g. following '{') used to avoid false matches.
################################################################################
def is_callback_interface_from_idl(file_contents):
match = re.search(r'callback\s+interface\s+\w+\s*{', file_contents)
return bool(match)
def should_generate_impl_file_from_idl(file_contents):
"""True when a given IDL file contents could generate .h/.cpp files."""
# FIXME: This would be error-prone and we should use AST rather than
# improving the regexp pattern.
match = re.search(r'(interface|dictionary|exception)\s+\w+', file_contents)
return bool(match)
def match_interface_extended_attributes_from_idl(file_contents):
# Strip comments
# re.compile needed b/c Python 2.6 doesn't support flags in re.sub
single_line_comment_re = re.compile(r'//.*$', flags=re.MULTILINE)
block_comment_re = re.compile(r'/\*.*?\*/', flags=re.MULTILINE | re.DOTALL)
file_contents = re.sub(single_line_comment_re, '', file_contents)
file_contents = re.sub(block_comment_re, '', file_contents)
match = re.search(r'\[(.*)\]\s*'
r'((callback|partial)\s+)?'
r'(interface|exception)\s+'
r'\w+\s*'
r'(:\s*\w+\s*)?'
r'{',
file_contents, flags=re.DOTALL)
return match
def get_interface_extended_attributes_from_idl(file_contents):
match = match_interface_extended_attributes_from_idl(file_contents)
if not match:
return {}
extended_attributes_string = match.group(1)
extended_attributes = {}
# FIXME: this splitting is WRONG: it fails on extended attributes where lists of
# multiple values are used, which are seperated by a comma and a space.
parts = [extended_attribute.strip()
for extended_attribute in re.split(',\s+', extended_attributes_string)
# Discard empty parts, which may exist due to trailing comma
if extended_attribute.strip()]
for part in parts:
name, _, value = map(string.strip, part.partition('='))
extended_attributes[name] = value
return extended_attributes
def get_interface_exposed_arguments(file_contents):
match = match_interface_extended_attributes_from_idl(file_contents)
if not match:
return None
extended_attributes_string = match.group(1)
match = re.search(r'[^=]\bExposed\(([^)]*)\)', file_contents)
if not match:
return None
arguments = []
for argument in map(string.strip, match.group(1).split(',')):
exposed, runtime_enabled = argument.split()
arguments.append({'exposed': exposed, 'runtime_enabled': runtime_enabled})
return arguments
| {
"content_hash": "20502e8e1837bcf7ac0bd9704c8caa03",
"timestamp": "",
"source": "github",
"line_count": 373,
"max_line_length": 108,
"avg_line_length": 34.48793565683646,
"alnum_prop": 0.6442786069651741,
"repo_name": "Pluto-tv/blink-crosswalk",
"id": "7c89a62c40a9aca51392148b2c07f397b7fadd8f",
"size": "13027",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "Source/bindings/scripts/utilities.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "ApacheConf",
"bytes": "1835"
},
{
"name": "Assembly",
"bytes": "14768"
},
{
"name": "Batchfile",
"bytes": "35"
},
{
"name": "C",
"bytes": "128002"
},
{
"name": "C++",
"bytes": "45337051"
},
{
"name": "CSS",
"bytes": "596289"
},
{
"name": "CoffeeScript",
"bytes": "163"
},
{
"name": "GLSL",
"bytes": "11578"
},
{
"name": "Groff",
"bytes": "28067"
},
{
"name": "HTML",
"bytes": "64824312"
},
{
"name": "Java",
"bytes": "109377"
},
{
"name": "JavaScript",
"bytes": "25099309"
},
{
"name": "Objective-C",
"bytes": "45096"
},
{
"name": "Objective-C++",
"bytes": "302371"
},
{
"name": "PHP",
"bytes": "220636"
},
{
"name": "Perl",
"bytes": "115958"
},
{
"name": "Python",
"bytes": "3879209"
},
{
"name": "Ruby",
"bytes": "73952"
},
{
"name": "Shell",
"bytes": "10282"
},
{
"name": "XSLT",
"bytes": "50203"
},
{
"name": "Yacc",
"bytes": "10148"
}
],
"symlink_target": ""
} |
"""
Python library for interacting with the T1 API. Uses third-party module Requests
(http://docs.python-requests.org/en/latest/) to get and post data, and ElementTree
to parse it.
"""
from __future__ import absolute_import
from .utils import filters
from .service import T1, T1Service
from . import errors
from .metadata import (__author__, __copyright__, __license__, __version__,
__maintainer__, __email__, __status__)
__all__ = ['T1', 'T1Service', 'filters', 'errors']
| {
"content_hash": "a5cffeb7cf0728dc505294580b74753e",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 82,
"avg_line_length": 35.57142857142857,
"alnum_prop": 0.6626506024096386,
"repo_name": "Cawb07/t1-python",
"id": "d3291d90c19ba42f76c708ed8f495be930f0e82b",
"size": "522",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "terminalone/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "177703"
}
],
"symlink_target": ""
} |
from django.db import models
from django import forms
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from sorl.thumbnail import ImageField as SorlImageField
from .utils.urls import is_absolute_url
import os
from uuid import uuid4
class SlugURLValidator(object):
message = _("Enter a valid 'slug' consisting of letters, numbers, underscores or hyphens.")
code = 'invalid'
def __init__(self):
pass
def __call__(self, value):
try:
if not is_absolute_url(value):
value.index('/')
raise ValidationError(self.message, code=self.code)
except ValueError:
pass
blocks_validator_slug = SlugURLValidator()
class SlugURLField(models.CharField):
default_validators = [blocks_validator_slug]
def validate(self, value, model_instance):
from .models import Menu
if isinstance(model_instance, Menu):
self.validators = []
if model_instance.type != Menu.TYPE_REDIRECT:
self.validators.append(blocks_validator_slug)
super(SlugURLField, self).validate(value, model_instance)
def to_python(self, value):
value = super(SlugURLField, self).to_python(value)
if value is None:
return value
if not is_absolute_url(value):
value = value.lower()
return value
class ImageField(SorlImageField):
#class ImageField(models.ImageField):
def __init__(self, verbose_name=None, name=None, upload_to=None, storage=None, **kwargs):
if not callable(upload_to):
upload_to = ImageField.path_and_rename(upload_to)
super(ImageField, self).__init__(verbose_name=verbose_name, name=name, upload_to=upload_to, storage=storage, **kwargs)
@staticmethod
def path_and_rename(path):
def wrapper(instance, filename):
ext = filename.split('.')[-1]
# set filename as random string
filename = '{}.{}'.format(uuid4().hex, ext)
# return the whole path to the file
return os.path.join('uploads', path, instance.__class__.__name__.lower(), filename)
return wrapper
class HiddenFormField(forms.IntegerField):
def __init__(self, *args, **kwargs):
kwargs['widget'] = forms.HiddenInput
super(HiddenFormField, self).__init__(*args, **kwargs)
class OrderField(models.PositiveSmallIntegerField):
def formfield(self, **kwargs):
defaults = {'form_class': HiddenFormField}
defaults.update(kwargs)
return super(OrderField, self).formfield(**defaults)
try:
from south.modelsinspector import add_introspection_rules
add_introspection_rules([], ["^blocks\.fields\.SlugURLField"])
add_introspection_rules([], ["^blocks\.fields\.ImageField"])
add_introspection_rules([], ["^blocks\.fields\.OrderField"])
except:
pass | {
"content_hash": "575f3326d4d9ed6789f9ddd3d089af7c",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 120,
"avg_line_length": 28.87912087912088,
"alnum_prop": 0.7256468797564688,
"repo_name": "kimus/django-blocks",
"id": "18394bb0254d77cffc2246678c4d888fa0990c3a",
"size": "2628",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "blocks/fields.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2345"
},
{
"name": "JavaScript",
"bytes": "23810"
},
{
"name": "Python",
"bytes": "111560"
}
],
"symlink_target": ""
} |
"""WSGI application initialization for Nova APIs."""
import os
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_reports import guru_meditation_report as gmr
from oslo_reports import opts as gmr_opts
from oslo_service import _options as service_opts
from paste import deploy
from nova import config
from nova import context
from nova import exception
from nova import objects
from nova import service
from nova import utils
from nova import version
CONF = cfg.CONF
CONFIG_FILES = ['api-paste.ini', 'nova.conf']
LOG = logging.getLogger(__name__)
objects.register_all()
def _get_config_files(env=None):
if env is None:
env = os.environ
dirname = env.get('OS_NOVA_CONFIG_DIR', '/etc/nova').strip()
return [os.path.join(dirname, config_file)
for config_file in CONFIG_FILES]
def _setup_service(host, name):
try:
utils.raise_if_old_compute()
except exception.TooOldComputeService as e:
if CONF.workarounds.disable_compute_service_check_for_ffu:
LOG.warning(str(e))
else:
raise
binary = name if name.startswith('nova-') else "nova-%s" % name
ctxt = context.get_admin_context()
service_ref = objects.Service.get_by_host_and_binary(
ctxt, host, binary)
if service_ref:
service._update_service_ref(service_ref)
else:
try:
service_obj = objects.Service(ctxt)
service_obj.host = host
service_obj.binary = binary
service_obj.topic = None
service_obj.report_count = 0
service_obj.create()
except (exception.ServiceTopicExists,
exception.ServiceBinaryExists):
# If we race to create a record with a sibling, don't
# fail here.
pass
def error_application(exc, name):
# TODO(cdent): make this something other than a stub
def application(environ, start_response):
start_response('500 Internal Server Error', [
('Content-Type', 'text/plain; charset=UTF-8')])
return ['Out of date %s service %s\n' % (name, exc)]
return application
@utils.run_once('Global data already initialized, not re-initializing.',
LOG.info)
def init_global_data(conf_files, service_name):
# NOTE(melwitt): parse_args initializes logging and calls global rpc.init()
# and db_api.configure(). The db_api.configure() call does not initiate any
# connection to the database.
# NOTE(gibi): sys.argv is set by the wsgi runner e.g. uwsgi sets it based
# on the --pyargv parameter of the uwsgi binary
config.parse_args(sys.argv, default_config_files=conf_files)
logging.setup(CONF, "nova")
gmr_opts.set_defaults(CONF)
gmr.TextGuruMeditation.setup_autorun(
version, conf=CONF, service_name=service_name)
# dump conf at debug (log_options option comes from oslo.service)
# FIXME(mriedem): This is gross but we don't have a public hook into
# oslo.service to register these options, so we are doing it manually for
# now; remove this when we have a hook method into oslo.service.
CONF.register_opts(service_opts.service_opts)
if CONF.log_options:
CONF.log_opt_values(
logging.getLogger(__name__),
logging.DEBUG)
def init_application(name):
conf_files = _get_config_files()
# NOTE(melwitt): The init_application method can be called multiple times
# within a single python interpreter instance if any exception is raised
# during it (example: DBConnectionError while setting up the service) and
# apache/mod_wsgi reloads the init_application script. So, we initialize
# global data separately and decorate the method to run only once in a
# python interpreter instance.
init_global_data(conf_files, name)
try:
_setup_service(CONF.host, name)
except exception.ServiceTooOld as exc:
return error_application(exc, name)
# This global init is safe because if we got here, we already successfully
# set up the service and setting up the profile cannot fail.
service.setup_profiler(name, CONF.host)
conf = conf_files[0]
return deploy.loadapp('config:%s' % conf, name=name)
| {
"content_hash": "a9bb7b47fe6f088d34075916eb9a2b33",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 79,
"avg_line_length": 33.60629921259842,
"alnum_prop": 0.6747891283973758,
"repo_name": "mahak/nova",
"id": "d60069ce844f91a3f68626e02873f7e74ce11622",
"size": "4840",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nova/api/openstack/wsgi_app.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "3545"
},
{
"name": "Mako",
"bytes": "1952"
},
{
"name": "Python",
"bytes": "23261880"
},
{
"name": "Shell",
"bytes": "28113"
},
{
"name": "Smarty",
"bytes": "507244"
}
],
"symlink_target": ""
} |
from indico_piwik.piwik import PiwikRequest
class PiwikQueryBase:
"""Base Piwik query"""
def __init__(self, query_script):
from indico_piwik.plugin import PiwikPlugin
self.request = PiwikRequest(server_url=PiwikPlugin.settings.get('server_api_url'),
site_id=PiwikPlugin.settings.get('site_id_events'),
api_token=PiwikPlugin.settings.get('server_token'),
query_script=query_script)
def call(self, **query_params):
return self.request.call(**query_params)
class PiwikQueryReportBase(PiwikQueryBase):
"""Base Piwik query to request reports"""
def __init__(self):
from indico_piwik.plugin import PiwikPlugin
super().__init__(query_script=PiwikPlugin.report_script)
def call(self, date=('last7',), period='day', **query_params):
date = ','.join(map(str, date))
return super().call(date=date, period=period, **query_params)
class PiwikQueryReportEventBase(PiwikQueryReportBase):
"""Base Piwik query to request reports of events and contributions"""
def __init__(self, event_id, start_date, end_date, contrib_id=None):
super().__init__()
self.event_id = event_id
self.contrib_id = contrib_id
self.start_date = start_date
self.end_date = end_date
def call(self, segmentation_enabled=True, **query_params):
if segmentation_enabled:
query_params['segment'] = self.get_segmentation()
return super().call(module='API', date=[self.start_date, self.end_date], **query_params)
def get_segmentation(self):
segmentation = {'customVariablePageName1': ('==', 'Conference'),
'customVariablePageValue1': ('==', self.event_id)}
if self.contrib_id:
segmentation['customVariablePageName2'] = ('==', 'Contribution')
segmentation['customVariablePageValue2'] = ('==', self.contrib_id)
segments = set()
for name, (equality, value) in segmentation.items():
segment = f'{name}{equality}{value}'
segments.add(segment)
return ';'.join(segments)
| {
"content_hash": "c02915ce0e53008e24a82ea2e73c0560",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 96,
"avg_line_length": 38.1551724137931,
"alnum_prop": 0.6095797559873475,
"repo_name": "indico/indico-plugins",
"id": "ce00289a41adb8350b03a1ffcfacee1b4d014b5e",
"size": "2456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "piwik/indico_piwik/queries/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4278"
},
{
"name": "HTML",
"bytes": "53511"
},
{
"name": "JavaScript",
"bytes": "19822"
},
{
"name": "Python",
"bytes": "469846"
},
{
"name": "SCSS",
"bytes": "2846"
},
{
"name": "Shell",
"bytes": "2926"
}
],
"symlink_target": ""
} |
import discord
from discord.ext import commands
from . import switch, wiiu_support, wiiu_results, ctr_support, ctr_results
class Results(commands.Cog):
"""
Parses game console result codes.
"""
def fetch(self, error):
if ctr_support.is_valid(error):
return ctr_support.get(error)
if ctr_results.is_valid(error):
return ctr_results.get(error)
if wiiu_support.is_valid(error):
return wiiu_support.get(error)
if wiiu_results.is_valid(error):
return wiiu_results.get(error)
if switch.is_valid(error):
return switch.get(error)
# Console name, module name, result, color
return None
def err2hex(self, error, suppress_error=False):
# If it's already hex, just return it.
if self.is_hex(error):
return error
# Only Switch is supported. The other two can only give nonsense results.
if switch.is_valid(error):
return switch.err2hex(error, suppress_error)
if not suppress_error:
return 'Invalid or unsupported error code format. \
Only Nintendo Switch XXXX-YYYY formatted error codes are supported.'
def hex2err(self, error, suppress_error=False):
# Don't bother processing anything if it's not hex.
if self.is_hex(error):
if switch.is_valid(error):
return switch.hex2err(error)
if not suppress_error:
return 'This isn\'t a hexadecimal value!'
def fixup_input(self, user_input):
# Truncate input to 16 chars so as not to create a huge embed or do
# eventual regex on a huge string. If we add support for consoles that
# that have longer error codes, adjust accordingly.
user_input = user_input[:16]
# Fix up hex input if 0x was omitted. It's fine if it doesn't convert.
try:
user_input = hex(int(user_input, 16))
except ValueError:
pass
return user_input
def is_hex(self, user_input):
try:
user_input = hex(int(user_input, 16))
except ValueError:
return False
return True
def check_meme(self, err: str) -> str:
memes = {
'0xdeadbeef': 'you sure you want to eat that?',
'0xdeadbabe': 'i think you have bigger problems if that\'s the case',
'0x8badf00d': 'told you not to eat it'
}
return memes.get(err.casefold())
@commands.command(aliases=['err', 'res'])
async def result(self, ctx, err: str):
"""
Displays information on game console result codes, with a fancy embed.
0x prefix is not required for hex input.
Examples:
.err 0xD960D02B
.err D960D02B
.err 022-2634
.err 102-2804
.err 2168-0002
.err 2-ARVHA-0000
"""
err = self.fixup_input(err)
if (meme := self.check_meme(err)) is not None:
return await ctx.send(meme)
ret = self.fetch(err)
if ret:
embed = discord.Embed(title=ret.get_title())
if ret.extra_description:
embed.description = ret.extra_description
for field in ret:
embed.add_field(name=field.field_name, value=field.message, inline=False)
embed.color = ret.color
await ctx.send(embed=embed)
else:
await ctx.send(f'{ctx.author.mention}, the code you entered is \
invalid or is for a system I don\'t have support for.')
@commands.command(aliases=['serr'])
async def nxerr(self, ctx, err: str):
"""
Displays information on switch result codes, with a fancy embed.
0x prefix is not required for hex input.
Examples:
.nxerr 0x4A8
.nxerr 4A8
.nxerr 2168-0002
.nxerr 2-ARVHA-0000
"""
err = self.fixup_input(err)
if (meme := self.check_meme(err)) is not None:
return await ctx.send(meme)
ret = None
if switch.is_valid(err):
ret = switch.get(err)
if ret:
embed = discord.Embed(title=ret.get_title())
if ret.extra_description:
embed.description = ret.extra_description
for field in ret:
embed.add_field(name=field.field_name, value=field.message, inline=False)
embed.color = ret.color
await ctx.send(embed=embed)
else:
await ctx.send(f'{ctx.author.mention}, the code you entered is \
invalid for the switch.')
@commands.command(aliases=['3dserr'])
async def ctrerr(self, ctx, err: str):
"""
Displays information on 3DS result codes, with a fancy embed.
0x prefix is not required for hex input.
Examples:
.ctrerr 0xD960D02B
.ctrerr D960D02B
.ctrerr 022-2634
"""
err = self.fixup_input(err)
if (meme := self.check_meme(err)) is not None:
return await ctx.send(meme)
ret = None
if ctr_support.is_valid(err):
ret = ctr_support.get(err)
elif ctr_results.is_valid(err):
ret = ctr_results.get(err)
if ret:
embed = discord.Embed(title=ret.get_title())
if ret.extra_description:
embed.description = ret.extra_description
for field in ret:
embed.add_field(name=field.field_name, value=field.message, inline=False)
embed.color = ret.color
await ctx.send(embed=embed)
else:
await ctx.send(f'{ctx.author.mention}, the code you entered is \
invalid for the 3DS.')
@commands.command(aliases=['wiiuerr'])
async def cafeerr(self, ctx, err: str):
"""
Displays information on Wii U result codes, with a fancy embed.
0x prefix is not required for hex input.
Examples:
.cafeerr 0xC070FA80
.cafeerr C070FA80
.cafeerr 0x18106FFF
.cafeerr 18106FFF
.cafeerr 102-2804
"""
err = self.fixup_input(err)
if (meme := self.check_meme(err)) is not None:
return await ctx.send(meme)
ret = None
if wiiu_support.is_valid(err):
ret = wiiu_support.get(err)
elif wiiu_results.is_valid(err):
ret = wiiu_results.get(err)
if ret:
embed = discord.Embed(title=ret.get_title())
if ret.extra_description:
embed.description = ret.extra_description
for field in ret:
embed.add_field(name=field.field_name, value=field.message, inline=False)
embed.color = ret.color
await ctx.send(embed=embed)
else:
await ctx.send(f'{ctx.author.mention}, the code you entered is \
invalid for the Wii U.')
@commands.command(name='err2hex')
async def cmderr2hex(self, ctx, error: str):
"""
Converts a support code of a console to a hex result code.
Switch only supported.
3DS and WiiU support and result codes are not directly interchangeable.
"""
error = self.fixup_input(error)
await ctx.send(self.err2hex(error))
@commands.command(name='hex2err')
async def cmdhex2err(self, ctx, error: str):
"""
Converts a hex result code of a console to a support code.
Switch only supported.
3DS and WiiU support and result codes are not directly interchangeable.
"""
error = self.fixup_input(error)
await ctx.send(self.hex2err(error))
@commands.command()
async def hexinfo(self, ctx, error: str):
"""
Breaks down a 3DS result code into its components.
"""
error = self.fixup_input(error)
if self.is_hex(error):
if ctr_results.is_valid(error):
mod, desc, summary, level = ctr_results.hexinfo(error)
embed = discord.Embed(title="3DS hex result info")
embed.add_field(name="Module", value=mod, inline=False)
embed.add_field(name="Summary", value=summary, inline=False)
embed.add_field(name="Level", value=level, inline=False)
embed.add_field(name="Description", value=desc, inline=False)
await ctx.send(embed=embed)
else:
await ctx.send('This isn\'t a 3DS result code.')
else:
await ctx.send('This isn\'t a hexadecimal value!')
def setup(bot):
bot.add_cog(Results(bot))
| {
"content_hash": "025cc2200cec708e126ce8fbe2d2dfdb",
"timestamp": "",
"source": "github",
"line_count": 260,
"max_line_length": 89,
"avg_line_length": 33.32692307692308,
"alnum_prop": 0.5779572994806693,
"repo_name": "ihaveamac/Kurisu",
"id": "b14c001e3de600d07ad43cd889404f94a6525365",
"size": "8665",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "cogs/results/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "234699"
}
],
"symlink_target": ""
} |
"""
Created on 10 October 2017
@author: Ashiv Dhondea
"""
import matplotlib.pyplot as plt
from matplotlib import rc
rc('font', **{'family': 'serif', 'serif': ['Helvetica']})
rc('text', usetex=True)
params = {'text.latex.preamble' : [r'\usepackage{amsmath}', r'\usepackage{amssymb}']}
plt.rcParams.update(params)
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
import pandas as pd
import numpy as np
import math
import AstroFunctions as AstFn
# ------------------------------------------------------------------------------------ #
dframe = pd.read_excel("MeerKAT64v36.wgs84.64x4_edited.xlsx",sheetname="Sheet1")
dframe = dframe.reset_index()
meerkat_id = dframe['ID'][0:64]
meerkat_lat = dframe['Lat'][0:64].astype(dtype=np.float64, copy=True)
meerkat_lon = dframe['Lon'][0:64].astype(dtype=np.float64, copy=True)
# -----------------------------------
altitude_meerkat = 1.038; # [km]
meerkat_ecef = np.zeros([64,3],dtype=np.float64);
baselines = np.zeros([64,64],dtype=np.float64);
for i in range(0,np.shape(meerkat_ecef)[0]):
meerkat_ecef[i,:] = AstFn.fnRadarSite(math.radians(meerkat_lat[i]),math.radians(meerkat_lon[i]),altitude_meerkat);
for i in range(63,0,-1):
for j in range(0,i,1):
baselines[i,j] = np.linalg.norm(np.subtract(meerkat_ecef[i,:],meerkat_ecef[j,:]))
#longest_baseline_indices = np.argmax(baselines);
longest_baseline_indices_unravel = np.unravel_index(baselines.argmax(), baselines.shape)
print longest_baseline_indices_unravel
longest_baseline = np.max(baselines)
print longest_baseline
print baselines[longest_baseline_indices_unravel[0],longest_baseline_indices_unravel[1]]
print baselines[60,48]
lim_lon_min = meerkat_lon.min();
lim_lon_max = meerkat_lon.max();
lim_lat_min = meerkat_lat.min();
lim_lat_max = meerkat_lat.max();
fig = plt.figure(1);
plt.rc('text', usetex=True)
plt.rc('font', family='serif');
map = Basemap(llcrnrlon=lim_lon_min-0.005,llcrnrlat=lim_lat_min-0.01,urcrnrlon=lim_lon_max+0.005,urcrnrlat=lim_lat_max+0.01,resolution='f', projection='cass', lat_0 = 0.0, lon_0 = 0.0) # see http://boundingbox.klokantech.com/
#map.drawmapboundary(fill_color='aqua')
#map.fillcontinents(color='coral',lake_color='aqua')
map.drawmapboundary(fill_color='lightblue')
map.fillcontinents(color='beige',lake_color='lightblue')
parallels = np.arange(-81.,0.,0.02)
# labels = [left,right,top,bottom]
map.drawparallels(parallels,labels=[False,True,False,False],labelstyle='+/-',linewidth=0.2)
meridians = np.arange(10.,351.,0.02)
map.drawmeridians(meridians,labels=[True,False,False,True],labelstyle='+/-',linewidth=0.2)
for i in range(64):
x,y = map(meerkat_lon[i],meerkat_lat[i]);
map.plot(x,y,marker='o',markersize=3,color='blue');
for i in range(48,64,1):
x,y = map(meerkat_lon[i],meerkat_lat[i]);
plt.text(x, y, r"\textbf{%s}" %meerkat_id[i],fontsize=6,color='navy')
# plt.annotate(r"\textbf{%s}" %meerkat_id[i],xy = (x,y),color='navy')
plt.title(r'\textbf{Location of MeerKAT dishes}', fontsize=12);
fig.savefig('main_xxx_meerkat_layout.pdf',bbox_inches='tight',pad_inches=0.08,dpi=10);
| {
"content_hash": "357c16b8df0db39294a0b7dceedd8bc8",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 225,
"avg_line_length": 37.94047619047619,
"alnum_prop": 0.6655161593975526,
"repo_name": "AshivDhondea/SORADSIM",
"id": "d82c4824af3e15e4361bd473db883504c625cb44",
"size": "3212",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "miscellaneous/main_xxx_meerkat_layout.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "1189967"
},
{
"name": "Python",
"bytes": "484131"
}
],
"symlink_target": ""
} |
import nose
import nose.config
import sys
from nose.plugins.manager import DefaultPluginManager
c = nose.config.Config()
c.plugins=DefaultPluginManager()
c.srcDirs = ['package']
if not nose.run(config=c):
sys.exit(1)
| {
"content_hash": "f8ccf00ea1a479cf935c80c2c4799f85",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 53,
"avg_line_length": 24.555555555555557,
"alnum_prop": 0.7692307692307693,
"repo_name": "QualiSystems/Azure-Shell",
"id": "4603b5825375bef098ce51681e7fc427691dbcbc",
"size": "221",
"binary": false,
"copies": "5",
"ref": "refs/heads/develop",
"path": "runtests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "906"
},
{
"name": "Python",
"bytes": "654779"
},
{
"name": "Shell",
"bytes": "616"
}
],
"symlink_target": ""
} |
from page_get.basic import get_page
from decorators.decorator import parse_decorator
import json
def get_followers_list_return(uid, page):
followers_wb_temp_url = 'https://m.weibo.cn/api/container/getIndex?containerid={}_-_followers_-_{}&luicode={}&lfid={}&featurecode={}&type=uid&value={}&page={}'
containerid = '231051'
luicode = '10000011'
lfid = '100505' + str(uid)
featurecode = '20000320'
value = str(uid)
url = followers_wb_temp_url.format(containerid, uid, luicode, lfid, featurecode, value, page)
html = get_page(url, user_verify=False, need_login=False)
return html
@parse_decorator(3)
def parse_json_to_dict(html):
cont = json.loads(html, encoding='utf-8')
return cont
# ๅฆๆๅจๆไธ้กตไธญๅนถๆฒกๆๆถ่ทๅฐๅพ็๏ผไนไธๅบ่ฏฅๅคๆญไธบ้่ฏฏ๏ผNone๏ผ
@parse_decorator(5)
def parse_dict_to_followers_list(wb_dict):
weibo_pic_list = []
cards = wb_dict['cards']
if cards:
for card in cards:
if 'title' in card:
# if card['title'] == 'ไป็ๅ
จ้จๅ
ณๆณจ':
return 'yes!'
# if card['title'] == 'ไป็ๅ
จ้จๅ
ณๆณจ' or card['title'] == 'ๅฅน็ๅ
จ้จๅ
ณๆณจ':
# one_wb_pic_list = mblog_to_db_handler(card['mblog'])
# if one_wb_pic_list:
# weibo_pic_list.extend(one_wb_pic_list)
# return weibo_pic_list
return 'no!'
def get_followers(uid):
page = 1
html = get_followers_list_return(uid, page)
json_dict = parse_json_to_dict(html)
follower_list = parse_dict_to_followers_list(json_dict)
return follower_list
| {
"content_hash": "684f173ad982360735506278618ae086",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 160,
"avg_line_length": 31.408163265306122,
"alnum_prop": 0.6185834957764782,
"repo_name": "KingOfBanana/SocialNetworkAI",
"id": "5fd328de0adac4465d7ad45c404c612df65d17d4",
"size": "1652",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "page_parse/followers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2240072"
},
{
"name": "Shell",
"bytes": "623"
}
],
"symlink_target": ""
} |
''' Dummy NFC Provider to be used on desktops in case no other provider is found
'''
from . import NFCBase
from kivy.clock import Clock
from kivy.logger import Logger
class ScannerDummy(NFCBase):
'''This is the dummy interface that gets selected in case any other
hardware interface to NFC is not available.
'''
_initialised = False
name = 'NFCDummy'
def nfc_init(self):
# print 'nfc_init()'
Logger.debug('NFC: configure nfc')
self._initialised = True
self.nfc_enable()
return True
def on_new_intent(self, dt):
tag_info = {'type': 'dymmy',
'message': 'dummy',
'extra details': None}
# let Main app know that a tag has been detected
app = App.get_running_app()
app.tag_discovered(tag_info)
app.show_info('New tag detected.', duration=2)
Logger.debug('NFC: got new dummy tag')
def nfc_enable(self):
Logger.debug('NFC: enable')
if self._initialised:
Clock.schedule_interval(self.on_new_intent, 22)
def nfc_disable(self):
# print 'nfc_enable()'
Clock.unschedule(self.on_new_intent)
def nfc_enable_exchange(self, data):
''' Start sending data
'''
Logger.debug('NFC: sending data {}'.format(data))
def nfc_disable_exchange(self):
''' Disable/Stop ndef exchange
'''
Logger.debug('NFC: disable nfc exchange')
| {
"content_hash": "2bb9b41de14902b3e508b98930b3f9e9",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 80,
"avg_line_length": 28.384615384615383,
"alnum_prop": 0.5962059620596206,
"repo_name": "asfin/electrum",
"id": "a0d3e2643ecfa1bb0cf1b96d1eb57ee4b32e4ba9",
"size": "1476",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "electrum/gui/kivy/nfc_scanner/scanner_dummy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1118"
},
{
"name": "GLSL",
"bytes": "289"
},
{
"name": "Java",
"bytes": "1574"
},
{
"name": "Makefile",
"bytes": "874"
},
{
"name": "NSIS",
"bytes": "7316"
},
{
"name": "Python",
"bytes": "2187670"
},
{
"name": "Shell",
"bytes": "21268"
}
],
"symlink_target": ""
} |
"""
Utility functions for the random forest classifier.
@copyright: The Broad Institute of MIT and Harvard 2015
"""
import numpy as np
import pandas as pd
import pickle
"""Return a function that gives a prediction from a design matrix row
"""
def gen_predictor(params_filename="./models/test/scikit_randf-params"):
clf = pickle.load(open(params_filename, "rb" ) )
def predictor(X):
scores = clf.predict_proba(X)
probs = [x[1] for x in scores]
return probs
return predictor | {
"content_hash": "64abc86f6ae3f8c3210c446d370c09b0",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 71,
"avg_line_length": 24.428571428571427,
"alnum_prop": 0.6939571150097466,
"repo_name": "broadinstitute/ebola-predictor",
"id": "fddaf290dfd65572262aecbb7f977fbe9ce7b774",
"size": "513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scikit_randf/utils.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "273299"
},
{
"name": "R",
"bytes": "3987"
}
],
"symlink_target": ""
} |
import os
# import sys
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u"dtool-create"
copyright = u"2017, Tjelvar Olsson"
author = u"Tjelvar Olsson"
repo_name = u"dtool-create"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u"0.23.4"
# The full version, including alpha/beta/rc tags.
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'default'
# Set the readthedocs theme.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
print('using readthedocs theme...')
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# otherwise, readthedocs.org uses their theme by default, so no need to specify
# it
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = '{}doc'.format(repo_name)
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, '{}.tex'.format(repo_name),
u'{} Documentation'.format(repo_name),
author, 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, repo_name, u'{} Documentation'.format(repo_name),
author, repo_name, u'Dtool plugin for creating datasets and collections',
'Miscellaneous'),
]
| {
"content_hash": "d08d30d9bcd102d6e6337cb6c6972880",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 79,
"avg_line_length": 30.576158940397352,
"alnum_prop": 0.6647173489278753,
"repo_name": "jic-dtool/dtool-create",
"id": "13b3a695694e95a60da400ead0e0379607f5e93c",
"size": "5165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29926"
}
],
"symlink_target": ""
} |
import sys
import time
from websocket import create_connection
from json import *
if(len(sys.argv)< 2):
print "test requires sitename: python testWebSocketOperations_server.py <url>"
sys.exit(2)
site = sys.argv[1]
ws = create_connection("ws://"+site)
#----------------------------------------------------------------------------------------------------
def runTests():
test_serverVersion();
test_getDataFrame();
test_getUserID();
test_getDataSetNames();
test_getGeneSets();
# test_getSampleCategorizations();
test_getManifest();
test_specifyCurrentDataset()
test_getPatientHistoryTable()
test_survivalCurves()
#userDataStoreTests() # need environment variable, lost within emacs/ess
test_getPatientHistoryDxAndSurvivalMinMax()
test_getDrugGeneInteractions()
# TODO: recent changes to DEMOdz include expression matrices far too large, breaking these
# TODO: data-specific tests. fix this! (pshannon 14aug2015)
test_getMarkersNetwork()
test_getPathway();
#test_pca() # contains 3 more granular tests
#test_plsr() # contains 3 more granular tests
print "OK: all python websocket json tests passed"
#----------------------------------------------------------------------------------------------------
def userDataStoreTests():
# these are all defined in pkg/R/wsUserDataStore.R
# environment variable ONCOSCAPE_USER_DATA_STORE must point to, eg,
# export ONCOSCAPE_USER_DATA_STORE=file:///Users/pshannon/oncoUserData
test_initUserDataStore()
test_getUserDataStoreSummary()
test_addDataToUserDataStore_1_item()
test_getDataItem()
test_deleteDataStoreItem()
#----------------------------------------------------------------------------------------------------
def test_ping():
"sends the 'ping' command, payload ignored, expects the current date in return"
print "--- test_ping"
payload = "datasets/wsJsonTests/test.py"
msg = dumps({"cmd": "ping", "status":"request", "callback":"", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload2 = result["payload"]
assert(payload2.find(payload) >= 0)
assert(len(payload2) > len(payload))
#------------------------------------------------------------------------------------------------------------------------
def test_serverVersion():
"sends the 'serverVersion' command, payload ignored, expects current x.y.z R package version in return"
print "--- test_serverVersion"
msg = dumps({"cmd": "getServerVersion", "status":"request", "callback":"", "payload": ""})
ws.send(msg)
result = loads(ws.recv())
version = result["payload"]
assert version.index("1.4") == 0
#------------------------------------------------------------------------------------------------------------------------
def test_getDataFrame():
"sends the 'getDataFrame' command, payload ignored, expects a small mixed-type json-encoding in return"
print "--- test_getDataFrame"
msg = dumps({"cmd": "getSampleDataFrame", "status":"request", "callback":"", "payload": ""})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
# the dataframe has been transformed in R to a matrix of type character
# columnames are shipped separately in the payload
# the contents of the matrix come as a list of lists, one row per list
assert payload.keys() == ["colnames", "tbl"]
columnNames = payload["colnames"]
assert columnNames == ['integers', 'strings', 'floats']
tbl = payload["tbl"]
assert tbl == [['1', 'ABC', '3.140'], ['2', 'def', '2.718']]
#------------------------------------------------------------------------------------------------------------------------
def test_getUserID():
"get the current value"
print "--- test_getUserId"
payload = "";
msg = dumps({"cmd": "getUserId", "status": "request", "callback": "", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
# in test mode, which is enforced by runWsTestOnco.R assignments in the current directory
# userID <- "[email protected]"
# current.datasets <- c("DEMOdz;TCGAgbm")
assert(result["payload"] == "[email protected]");
#------------------------------------------------------------------------------------------------------------------------
def test_getDataSetNames():
"get a list of the names"
print "--- test_getDataSetNames"
payload = "";
msg = dumps({"cmd": "getDataSetNames", "status": "request", "callback": "", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
# in test mode, which is enforced by runDevel.R assignments in the parent directory
# userID <- "[email protected]"
# current.datasets <- c("DEMOdz;TCGAgbm")
# we expect only DEMOdz
payload = result["payload"]
assert(result["payload"]["datasets"] == ["DEMOdz","TCGAgbm"])
assert(result["payload"]["passwordProtected"] == False)
#------------------------------------------------------------------------------------------------------------------------
def test_getManifest():
"get the full data.frame for DEMOdz"
print "--- test_getManifest"
payload = "DEMOdz";
msg = dumps({"cmd": "getDataManifest", "status": "request", "callback": "", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
fieldNames = payload.keys()
fieldNames.sort()
assert fieldNames == ["colnames", "datasetName", "mtx", "rownames"]
# in test mode, which is enforced by runDevel.R assignments in the parent directory
# userID <- "[email protected]"
# current.datasets <- c("DEMOdz")
# we expect only DEMOdz
colnames = payload["colnames"]
assert(len(colnames) == 9)
assert(colnames[0:3] == ["category", "subcategory", "rows"])
# the matrix (it's all strings right now) comes across the wire as
# as a list of lists, which in javascript will appears as an array of arrays
mtx = payload["mtx"]
assert type(mtx) is list
assert type(mtx[0]) is list
assert len(mtx) >= 9
assert len(mtx[0]) == 9
# each row is actually a row
#assert mtx[0][0:4] == [u'mRNA expression', u'Z scores', u' 20', u' 64']
#------------------------------------------------------------------------------------------------------------------------
def test_histogramCoordinatesIntentionalError():
"demonstrate error tryCatch, returning explanatory standard json message"
print "--- test_histogramCoordinatesIntentionalError"
dataset = "DEMOdz";
dataItem = "mtx.mRNA";
cmd = "calculateHistogramCoordinates"
callback = "handleHistogramCoordinates"
# elicit a representative error, make sure it is trapped and returned as a bona fide json message
# with enough error detail for us to figure out
payload = "intentional error"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
# we expect this reply
# {cmd: 'handleHistogramCoordinates'
# status: 'error',
# callback: '',
# payload: 'OncoDev13 (version 1.3.8) exception!
# Error in payload$dataset: $ operator is invalid for atomic vectors\n.
# incoming msg: request; handleHistogramCoordinates; calculateHistogramCoordinates; intentional error'}
assert(result["status"] == "error")
assert(result["payload"].find("exception!") >= 0)
assert(result["payload"].find("(version") >= 0)
assert(result["payload"].find(callback) >= 0)
assert(result["payload"].find(payload) >= 0)
# testHistogramCoordinatesIntentionalError
#------------------------------------------------------------------------------------------------------------------------
def test_histogramCoordinatesDEMOdz_mrna():
"demonstrate error tryCatch, returning explanatory standard json message"
print "--- test_histogramCoordinatesDEMOdz_mrna"
dataset = "DEMOdz";
dataItem = "mtx.mrna";
cmd = "calculateHistogramCoordinates"
callback = "handleHistogramCoordinates"
payload = {"dataset": dataset, "dataItem": dataItem}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
breaks = payload["breaks"]
counts = payload["counts"]
mids = payload["mids"]
assert(breaks == [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 6])
assert(counts == [1, 2, 25, 196, 421, 409, 181, 38, 5, 1, 1])
assert(mids == [-4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5])
# testHistogramCoordinatesDEMOdz_mrna
#------------------------------------------------------------------------------------------------------------------------
def test_specifyCurrentDataset():
"set current dataset, with legal value, with a nonsensical one"
print "--- test_specifyCurrentDataset"
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
assert(payload.keys() == ['datasetName', 'mtx', 'rownames', 'colnames'])
assert(payload["rownames"][0:2] == ['mtx.mrna.ueArray.RData', 'mtx.mrna.bc.RData'])
assert(result["cmd"] == callback)
# set another legitimate dataset
dataset = "TCGAgbm";
payload = dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["payload"]["datasetName"] == dataset);
assert(result["cmd"] == callback)
# now one which should fail
dataset = "hocus-pocus";
payload = dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "error"
assert result["payload"].find("exception!") >= 0
assert result["payload"].find("not TRUE") >= 0
#------------------------------------------------------------------------------------------------------------------------
def test_specifyCurrentDataset():
"set current dataset, with legal value, with a nonsensical one"
print "--- test_specifyCurrentDataset"
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
fields = payload.keys()
assert fields == ["datasetName", "mtx", "rownames", "colnames"];
assert payload["datasetName"] == "DEMOdz"
mtx = payload["mtx"]
colnames = payload["colnames"]
rownames = payload["rownames"]
datasetName = payload["datasetName"]
assert datasetName == "DEMOdz"
assert colnames == ['category', 'subcategory', 'rows', 'cols', 'row type', 'column type', 'minValue', 'maxValue', 'provenance']
# we might get more rows in the manifest matrix than the 9 we now have, but the column count should be pretty constant
assert len(mtx[0]) == 9
# check just one row's name
assert(rownames.index("mtx.mut.RData") >= 0)
#------------------------------------------------------------------------------------------------------------------------
def test_getPatientHistoryTable():
"set current dataset, ask for patient history"
print "--- test_getPatientHistoryTable"
#------------------------------------------------------------
# first specify currentDataSet
#------------------------------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------------------------------------
# ask for the history table. duration formats (Survival, AgeDx, TimeFirstProgression)
# default to the native storage type, "byDay"
#------------------------------------------------------------------------------------------
cmd = "getPatientHistoryTable"
callback = "displayPatientHistory"
payload = "" # equivalent to {"durationFormat": "byDay"} - the default
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
fields = result.keys()
fields.sort()
assert fields == ["callback", "cmd", "payload", "status"]
assert result["status"] == "success"
assert result["callback"] == ""
assert result["cmd"], "displayPatientHistory"
payload = result["payload"]
assert payload.keys() == ["colnames", "tbl"]
columnNames = payload["colnames"]
# current column names will likely change. but for now:
assert columnNames == ['ptID', 'study', 'Birth.gender', 'Survival', 'AgeDx',
'TimeFirstProgression', 'Status.status']
tbl = payload["tbl"]
assert len(tbl) == 20 # 20 rows in the DEMOdz patient history table
assert len(tbl[0]) == 7 # 7 columns extracted from possibly many more
# this will become a dataPackage-specific list
# todo: find out where the whitespace comes from,
# todo: e.g. ' 9369'
assert(tbl[0][3].strip() == "2512")
assert(tbl[0][4].strip() == "9369")
assert(tbl[0][5].strip() == "2243")
#------------------------------------------------------------------------------------------
# ask for the history table, now with duration format "byYear"
#------------------------------------------------------------------------------------------
cmd = "getPatientHistoryTable"
callback = "displayPatientHistory"
payload = {"durationFormat": "byYear"}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
fields = result.keys()
fields.sort()
assert fields == ["callback", "cmd", "payload", "status"]
assert result["status"] == "success"
assert result["callback"] == ""
assert result["cmd"], "displayPatientHistory"
payload = result["payload"]
assert payload.keys() == ["colnames", "tbl"]
columnNames = payload["colnames"]
# current column names will likely change. but for now:
assert columnNames == ['ptID', 'study', 'Birth.gender', 'Survival', 'AgeDx',
'TimeFirstProgression', 'Status.status']
tbl = payload["tbl"]
assert len(tbl) == 20 # 20 rows in the DEMOdz patient history table
assert len(tbl[0]) == 7 # 7 columns extracted from possibly many more
# this will become a dataPackage-specific list
assert(tbl[0][3] == "6.877")
assert(tbl[0][4] == "25.651")
assert(tbl[0][5] == "6.141")
#----------------------------------------------------------------------------------------------------
def test_getPatientHistoryDxAndSurvivalMinMax():
print "--- test_getPatientHistoryDxAndSurvivalMinMax"
#------------------------------------------------------------
# first specify currentDataSet
#------------------------------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "TCGAgbm";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------
# now ask for ageAtDx and survival mins and maxes
#------------------------------------------------------------
cmd = "getPatientHistoryDxAndSurvivalMinMax"
callback = "handleAgeAtDxAndSurvivalRanges"
status = "request"
payload = ""
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["cmd"] == callback
assert result["status"] == "success"
assert(result["payload"].keys() == ['ageAtDxLow', 'ageAtDxHigh', 'survivalHigh', 'survivalLow'])
assert(result["payload"]["ageAtDxLow"] == 3982)
assert(result["payload"]["ageAtDxHigh"] == 32612)
assert(result["payload"]["survivalLow"] == 3)
assert(result["payload"]["survivalHigh"] == 3881)
#----------------------------------------------------------------------------------------------------
# we typically find "history.RData", a list of lists, in a data package. but this caisis-derived
# data is not always available. the alternative is a tsv file (data.frame) hand-crafted from
# relevant patient or sample hsitory. client code will supply the name, and this
def test_getPrebuiltPatientHistoryTable():
"set current dataset, ask for prebuilt patient history"
print "--- test_getPrebuiltPatientHistoryTable"
#------------------------------------------------------------
# first specify currentDataSet
#------------------------------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------
# now ask for the history table
#------------------------------------------------------------
cmd = "getPatientHistoryTable"
callback = "displayPatientHistory"
payload = ""
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
fields = result.keys()
fields.sort()
assert fields == ["callback", "cmd", "payload", "status"]
assert result["status"] == "success"
assert result["callback"] == ""
assert result["cmd"], "displayPatientHistory"
payload = result["payload"]
assert payload.keys() == ["colnames", "tbl"]
columnNames = payload["colnames"]
# current column names will likely change. but for now:
assert columnNames == ['ptID', 'study', 'Birth.gender', 'Survival', 'AgeDx',
'TimeFirstProgression', 'Status.status']
tbl = payload["tbl"]
assert len(tbl) == 20 # 20 rows in the DEMOdz patient history table
assert len(tbl[0]) == 7 # will evolve to be configurable for each data package
assert(tbl[0][3] == "2512")
assert(tbl[0][4] == "25.651")
assert(tbl[0][5] == "6.141")
#----------------------------------------------------------------------------------------------------
def test_getGeneSets():
"set current dataset, ask for geneset names, then the genes in one set"
print "--- test_getGeneSets"
#------------------------------------------------------------
# first specify currentDataSet
#------------------------------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------
# now ask for the geneset names
#------------------------------------------------------------
cmd = "getGeneSetNames"
callback = "handleGeneSetNames"
payload = ""
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
fields = result.keys()
fields.sort()
assert fields == ["callback", "cmd", "payload", "status"]
assert result["status"] == "success"
assert result["callback"] == ""
assert result["cmd"], callback
payload = result["payload"]
payload.sort()
assert payload == ["random.24", "random.40", "test4"]
cmd = "getGeneSetGenes"
payload = "random.24"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
genes = result["payload"]
genes.sort()
assert len(genes) == 24
assert genes[0:5] == ['EFEMP2', 'ELAVL1', 'ELAVL2', 'ELL', 'ELN']
#----------------------------------------------------------------------------------------------------
def test_getSampleCategorizations():
"set current dataset, ask for the names of the categorizations, then the actual datat"
print "--- test_getSampleCategorizations"
payload = "";
msg = dumps({"cmd": "getDataSetNames", "status": "request", "callback": "", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["payload"]["datasets"].index("DEMOdz") >= 0)
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------
# now ask for the sample categorization names
#------------------------------------------------------------
cmd = "getSampleCategorizationNames"
callback = "handleSampleCategorizationNames"
payload = ""
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["payload"].index("tbl.glioma8") >= 0)
assert(result["payload"].index("tbl.verhaakPlus1") >= 0)
#------------------------------------------------------------
# now ask for the sample categorization data
#------------------------------------------------------------
cmd = "getSampleCategorization"
payload = "tbl.verhaakPlus1"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
colnames = result["payload"]["colnames"]
rownames = result["payload"]["rownames"]
tbl = result["payload"]["tbl"]
assert(colnames == [u'cluster', u'color'])
assert(len(rownames) == 548)
assert(len(tbl) == 548)
assert(len(tbl[0]) == 2)
payload = "tbl.glioma8"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
colnames = result["payload"]["colnames"]
rownames = result["payload"]["rownames"]
tbl = result["payload"]["tbl"]
assert(colnames == [u'cluster', u'color'])
assert(len(rownames) == 704)
assert(len(tbl) == 704)
assert(len(tbl[0]) == 2)
#----------------------------------------------------------------------------------------------------
def test_getMarkersNetwork():
"set current dataset, ask for markers network"
print "--- test_getMarkersNetwork"
#------------------------------------------------------------
# first specify currentDataSet
#------------------------------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------
# now ask for the geneset names
#------------------------------------------------------------
cmd = "getMarkersNetwork"
callback = "displayMarkersNetwork"
payload = ""
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["cmd"] == callback
assert result["status"] == "success"
assert len(result["payload"]) > 50000 # 67263 on (14 aug 2015)
#----------------------------------------------------------------------------------------------------
def test_getPathway():
"set current dataset, ask for pi3kAkt pathway"
print "--- test_getPathway"
#------------------------------------------------------------
# first specify currentDataSet
#------------------------------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "DEMOdz";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#------------------------------------------------------------
# now ask for the geneset names
#------------------------------------------------------------
cmd = "getPathway"
callback = "displayMarkersNetwork"
payload = "g.pi3kAkt.json"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["cmd"] == callback
assert result["status"] == "success"
assert len(result["payload"]) > 150000 # 1670299 on (14 aug 2015)
#----------------------------------------------------------------------------------------------------
def test_initUserDataStore():
"connect to (and possibly create) a user data store"
print "--- test_initUserDataStore"
cmd = "initUserDataStore"
callback = "userDataStoreInitialized"
payload = {"userID": "[email protected]", "userGroups": ["public", "test"]};
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
fields = result.keys()
fields.sort()
assert fields == ["callback", "cmd", "payload", "status"]
assert result["status"] == "success"
assert result["callback"] == ""
assert result["cmd"], "userDataStoreInitialized"
# the datastore may have been created previously, and may have some data items in it.
# the payload is the data item count
assert result["payload"] >= 0
#------------------------------------------------------------------------------------------------------------------------
def test_getUserDataStoreSummary():
print "--- test_getUserDataStoreSummary"
"connect to a user data store, get a table describing contents"
#--------------------------------------------------------------------------------
# first initialize, guaranteeing that the datastore exists (even if empty)
#--------------------------------------------------------------------------------
cmd = "initUserDataStore"
callback = "userDataStoreInitialized"
payload = {"userID": "[email protected]", "userGroups": ["public", "test"]};
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
itemCount = result["payload"]
assert itemCount >= 0
cmd = "getUserDataStoreSummary"
callback = "handleUserDataStoreSummary"
payload = {"userID": "[email protected]", "userGroups": ["public", "test"]};
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
fields = result["payload"].keys()
fields.sort()
assert fields == ["colnames", "tbl"]
colnames = payload["colnames"]
colnames.sort()
assert colnames == ['created', 'file', 'group', 'name', 'permissions', 'tags', 'user']
tbl = payload["tbl"]
assert len(tbl) == itemCount
#------------------------------------------------------------------------------------------------------------------------
def test_addDataToUserDataStore_1_item():
print "--- test_getUserDataStoreSummary_1_item, a list of the first 10 integers"
"connect to a user data store, get a table describing contents"
#--------------------------------------------------------------------------------
# first initialize, guaranteeing that the datastore exists (even if empty)
#--------------------------------------------------------------------------------
cmd = "initUserDataStore"
callback = "userDataStoreInitialized"
payload = {"userID": "[email protected]", "userGroups": ["public", "test"]};
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
itemCount = result["payload"]
assert itemCount >= 0
#--------------------------------------------------------------------------------
# now add a sequence of integers
#--------------------------------------------------------------------------------
cmd = "userDataStoreAddData"
callback = "userDataStoreDataAdded"
userID = "[email protected]"
payload = {"userID": userID,
"userGroups": ["public", "test"],
"dataItem": range(1,10),
"name": "oneToTen",
"group": "public",
"permissions": 444,
"tags": ["numbers", "test data"]};
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
newItemCount = result["payload"]
assert newItemCount > itemCount
displayUserDataStoreSummary(userID)
# test_addDataToUserDataStore_1_item
#------------------------------------------------------------------------------------------------------------------------
def displayUserDataStoreSummary(userID):
print "--- test_displayUserDataStoreSummary"
cmd = "getUserDataStoreSummary"
callback = "handleUserDataStoreSummary"
payload = {"userID": userID, "userGroups": ["public", "test"]}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
#------------------------------------------------------------------------------------------------------------------------
# get the summary table, pick out a dataItemID (currently in the "file" column of the table),
# retrieve it, make sure it is a reasonable value
#
def test_getDataItem():
print "--- test_getDataItem"
#----------------------------------------------------------------
# load the summary table. at least one item should be there
# as long as this test is run after test_addDataToUserDataStore_1_item
# with no intervening delete
#----------------------------------------------------------------
userID = "[email protected]"
cmd = "getUserDataStoreSummary"
callback = ""
payload = {"userID": userID, "userGroups": ["public", "test"]}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
# make sure that the item added in a prior test is indeed present
tbl = payload["tbl"]
assert len(tbl) > 0;
#----------------------------------------------------------------
# get the target
#----------------------------------------------------------------
ids = map(lambda x: x[0], tbl)
assert len(ids) > 0
target = ids[0]
#----------------------------------------------------------------
# get the target's data
#----------------------------------------------------------------
cmd = "userDataStoreGetDataItem"
callback = "do nothing";
payload = {"userID": userID, "userGroups": ["public", "test"], "dataItemName": target}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
# TODO: this test could (should) go further:
# setup a deterministic state in the datastore, get a specific
# dataItem, check for equality
# further, a wide variety of data types should be stored, and
# all retrieved reliably
# test_getDataItem
#------------------------------------------------------------------------------------------------------------------------
# get the summary table, pick out a dataItemID (currently in the "file" column of the table), delete it, check that it
# is gone
def test_deleteDataStoreItem():
print "--- test_deleteDataStoreItem"
#----------------------------------------------------------------
# first get a valid dataItemID, for a data item placed there
# by a preceeding "add" test.
#----------------------------------------------------------------
userID = "[email protected]"
cmd = "getUserDataStoreSummary"
callback = ""
payload = {"userID": userID, "userGroups": ["public", "test"]}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
# make sure that at least one item added in a prior test is present
tbl = payload["tbl"]
ids = map(lambda x: x[0], tbl)
assert len(ids) > 0
target = ids[0]
#----------------------------------------------------------------
# delete the target
#----------------------------------------------------------------
cmd = "userDataStoreDeleteDataItem";
callback = "userDataStoreDataItemDeleted"
payload = {"userID": userID, "userGroups": ["public", "test"], "dataItemName": target}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
assert payload == "'%s' deleted" % target
#----------------------------------------------------------------
# make sure the target id is gone
#----------------------------------------------------------------
userID = "[email protected]"
cmd = "getUserDataStoreSummary"
callback = ""
payload = {"userID": userID, "userGroups": ["public", "test"]}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
payload = result["payload"]
tbl = payload["tbl"]
ids = map(lambda x: x[0], tbl)
assert not target in ids
#----------------------------------------------------------------
# try to delete a bogus target
#----------------------------------------------------------------
target = "bogusTarget"
cmd = "userDataStoreDeleteDataItem";
callback = "userDataStoreDataItemDeleted"
payload = {"userID": userID, "userGroups": ["public", "test"], "dataItemName": target}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "error"
payload = result["payload"]
assert payload == "wsUserDataStore, no item named '%s' to delete" % target
# test_deleteDataStoreItem
#------------------------------------------------------------------------------------------------------------------------
def test_survivalCurves():
print "--- test_survivalCurves"
#----------------------------------------
# use TCGAgbm
#----------------------------------------
cmd = "specifyCurrentDataset"
callback = "datasetSpecified"
dataset = "TCGAgbm";
payload = dataset
# set a legitimate dataset
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload});
ws.send(msg)
result = loads(ws.recv())
assert result["payload"]["datasetName"] == dataset;
assert result["cmd"] == callback
assert result["status"] == "success"
#-------------------------------------------------------------------------------------------------------
# choose 5 long survivors: see test_survival: ids <- subset(tbl, Survival > 2800)$ptID[1:sampleCount]
#-------------------------------------------------------------------------------------------------------
longSurvivors = ["TCGA.06.6693", "TCGA.12.1088", "TCGA.02.0113", "TCGA.02.0114", "TCGA.08.0344"]
cmd = "calculateSurvivalCurves"
callback = "displaySurvivalCurves"
payload = {"sampleIDs": longSurvivors, "title": "testWebSocketOperations.py"}
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload});
ws.send(msg)
result = loads(ws.recv())
assert result["status"] == "success"
assert result["cmd"] == callback
payload = result["payload"]
assert len(payload) > 10000 # base64 encoded image, coming in as characters
#------------------------------------------------------------------------------------------------------------------------
def test_getDrugGeneInteractions():
print "--- test_getDrugGeneInteractions"
cmd = "getDrugGeneInteractions"
geneSymbols = ["HDAC10", "GABRE", "SLC5A4", "MDM2", "OAZ2", "PGR"]
payload = {"genes": geneSymbols};
callback = "handleDrugGeneInteractions"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload});
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
fields = payload.keys()
assert fields == ['colnames', 'tbl']
colnames = payload["colnames"]
assert (colnames == ["gene", "drug", "drugID", "interaction", "source"])
tbl = payload["tbl"]
assert(len(tbl) > 10) # actually 186 on (24 jul 2015)
assert(len(tbl[0]) == len(colnames))
# make sure the geneSymbols returned are actually in the input list
assert(tbl[0][0] in geneSymbols)
# now try bogus geneSymbols which will never be matched
geneSymbols = ["moeBogus", "curlyBogus", "", "harpoBogus"]
payload = {"genes": geneSymbols};
callback = "handleDrugGeneInteractions"
msg = dumps({"cmd": cmd, "status": "request", "callback": callback, "payload": payload});
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"]
fields = payload.keys()
assert fields == ['colnames', 'tbl']
colnames = payload["colnames"]
assert (colnames == ["gene", "drug", "drugID", "interaction", "source"])
tbl = payload["tbl"]
assert(len(tbl) == 0)
#------------------------------------------------------------------------------------------------------------------------
def test_pca():
print "--- test_pca"
test_pcaCreateWithDataSet()
test_pcaCalculate()
#------------------------------------------------------------------------------------------------------------------------
def test_pcaCreateWithDataSet():
print ("--- test_pcaCreateWithDataSet");
# two mRNA expression matrices in DEMOdz:
# "mtx.mrna.ueArray" "mtx.mrna.bc"
payload = {"dataPackage": "DEMOdz", "matrixName": "mtx.mrna.ueArray"}
msg = dumps({"cmd": "createPCA", "status":"request",
"callback": "pcaCreatedHandler", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"];
assert(payload.find("PCA package, matrices:") >= 0)
#------------------------------------------------------------------------------------------------------------------------
def test_pcaCalculate():
"calculates pca on DEMOdz, the full mrna matrix, using pca object created above"
print "--- test_pcaCalculate"
msg = dumps({"cmd": "calculatePCA", "status":"request",
"callback":"handlePcaResult", "payload": ""})
ws.send(msg)
result = loads(ws.recv())
assert(result["cmd"] == "handlePcaResult")
assert(result["status"] == "success")
payload = result["payload"]
keys = payload.keys()
keys.sort()
assert(keys == ['ids', 'importance.PC1', 'importance.PC2', 'maxValue', 'scores'])
ids = payload["ids"]
assert(len(ids) >= 20)
assert(ids.index('TCGA.02.0003.01') >= 0)
assert(payload["maxValue"] > 10)
assert(payload["importance.PC1"] > 0.0)
assert(payload["importance.PC2"] > 0.0)
#------------------------------------------------------------------------------------------------------------------------
def test_plsr():
print "--- test_plsr"
test_plsrCreateWithDataSet()
test_plsrSummarizePLSRPatientAttributes()
test_plsrCalculateSmallOneFactor()
test_plsrCalculateSmallTwoFactors()
#------------------------------------------------------------------------------------------------------------------------
def test_plsrCreateWithDataSet():
"sends dataset as a named string, gets back show method's version of the dataset object"
print "--- testCreateWithDataSet"
# two mRNA expression matrices in DEMOdz:
# "mtx.mrna.ueArray" "mtx.mrna.bc"
payload = {"dataPackage": "DEMOdz", "matrixName": "mtx.mrna.ueArray"}
msg = dumps({"cmd": "createPLSR", "status":"request",
"callback":"PLSRcreatedHandler", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
payload = result["payload"];
assert(payload.find("PLSR package, matrices:") >= 0)
#------------------------------------------------------------------------------------------------------------------------
def test_plsrSummarizePLSRPatientAttributes():
"gets five-number summary of any numerical attribute in the patient history table"
print "--- testSummarizePLSRPatientAttributes"
payload = ["AgeDx"]
msg = dumps({"cmd": "summarizePLSRPatientAttributes", "status":"request",
"callback":"handlePlsrClincialAttributeSummary", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["status"] == "to be examined element by element")
assert(result["cmd"] == "handlePlsrClincialAttributeSummary")
assert(result["payload"]["AgeDx"] == [9369, 15163.5, 19153, 25736, 31566])
# send a second reqauest, but one guaranteed to fail
payload = "bogus"
msg = dumps({"cmd": "summarizePLSRPatientAttributes", "status":"request",
"callback":"handlePlsrClincialAttributeSummary", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["payload"]["bogus"] == None)
#----------------------------------------------------------------------------------------------------
def test_plsrCalculateSmallOneFactor():
"calculates plsr on DEMOdz, with two patient groups, low and high AgeDx (age at diagnosis)"
print "--- testCalculateSmallOneFactor"
# in R: sample(colnames(matrices(getDataPackage(myplsr))$mtx.mrna), size=10)
genesOfInterest = ["ELL","EIF4A2","ELAVL2","UPF1","EGFR","PRPSAP2","TTPA","PIGP","TTN","UNC45A"]
factor = {"name": "AgeDx", "low": 12000, "high": 2800}
payload = {"genes": genesOfInterest, "factorCount": 1, "factors": [factor]};
msg = dumps({"cmd": "calculatePLSR", "status":"request",
"callback":"handlePlsrResult", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["cmd"] == "handlePlsrResult")
assert(result["status"] == "success")
payload = result["payload"]
fieldNames = payload.keys()
fieldNames.sort()
assert(fieldNames == ['loadingNames', 'loadings', 'maxValue', 'vectorNames', 'vectors'])
vectors = payload["vectors"]
assert(len(vectors) == 2)
vectorNames = payload["vectorNames"]
assert(vectorNames == ['AgeDx.lo', 'AgeDx.hi'])
loadings = payload["loadings"]
loadingNames = payload["loadingNames"]
assert(loadingNames == genesOfInterest)
assert(len(loadings) == 10)
maxValue = payload["maxValue"]
assert(maxValue == 0.8195)
#----------------------------------------------------------------------------------------------------
def test_plsrCalculateSmallTwoFactors():
"calculates plsr on DEMOdz, with two patient groups, low and high AgeDx (age at diagnosis)"
print "--- test_plsrCalculateSmallTwoFactors"
# in R: sample(colnames(matrices(getDataPackage(myplsr))$mtx.mrna), size=10)
genesOfInterest = ["ELL","EIF4A2","ELAVL2","UPF1","EGFR","PRPSAP2","TTPA","PIGP","TTN","UNC45A"]
factor1 = {"name": "AgeDx", "low": 12000, "high": 2800}
factor2 = {"name": "Survival", "low": 20, "high": 3000}
payload = {"genes": genesOfInterest, "factorCount": 2, "factors": [factor1, factor2]};
msg = dumps({"cmd": "calculatePLSR", "status":"request",
"callback":"handlePlsrResult", "payload": payload})
ws.send(msg)
result = loads(ws.recv())
assert(result["cmd"] == "handlePlsrResult")
assert(result["status"] == "success")
payload = result["payload"]
fieldNames = payload.keys()
fieldNames.sort()
assert(fieldNames == ['loadingNames', 'loadings', 'maxValue', 'vectorNames', 'vectors'])
vectors = payload["vectors"]
vectorNames = payload["vectorNames"]
assert(vectorNames == ['AgeDx.lo', 'AgeDx.hi', 'Survival.lo', 'Survival.hi'])
loadings = payload["loadings"]
loadingNames = payload["loadingNames"]
assert(loadingNames == genesOfInterest)
assert(len(vectors) == 4)
assert(len(loadings) == 10)
maxValue = payload["maxValue"]
assert(maxValue == 0.8822)
#------------------------------------------------------------------------------------------------------------------------
interactive = (sys.argv[0] != "testWebSocketOperations_server.py")
if(not(interactive)):
runTests()
| {
"content_hash": "6a34e3bc82ebfd85d9bcf1eec0b081e4",
"timestamp": "",
"source": "github",
"line_count": 1275,
"max_line_length": 129,
"avg_line_length": 36.199215686274506,
"alnum_prop": 0.5418598604671317,
"repo_name": "oncoscape/Oncoscape",
"id": "ebfef96c68a609c1e0b4294a8243ccb5dc8ef923",
"size": "46285",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Oncoscape/inst/unitTests/testWebSocketOperations_server.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "477"
},
{
"name": "HTML",
"bytes": "1470640"
},
{
"name": "JavaScript",
"bytes": "4566496"
},
{
"name": "Makefile",
"bytes": "13241"
},
{
"name": "Python",
"bytes": "159722"
},
{
"name": "R",
"bytes": "1699435"
},
{
"name": "Shell",
"bytes": "1608"
},
{
"name": "TeX",
"bytes": "8554"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Stream.running'
db.add_column(u'twitter_stream', 'running',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Stream.running'
db.delete_column(u'twitter_stream', 'running')
models = {
u'twitter.followedlocation': {
'Meta': {'object_name': 'FollowedLocation'},
'bounding_box': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followed_locations'", 'to': u"orm['twitter.Stream']"})
},
u'twitter.followeduser': {
'Meta': {'object_name': 'FollowedUser'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'followed_users'", 'to': u"orm['twitter.Stream']"}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '500'})
},
u'twitter.stream': {
'Meta': {'object_name': 'Stream'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'running': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('autoslug.fields.AutoSlugField', [], {'unique_with': '()', 'max_length': '50', 'populate_from': "'name'"})
},
u'twitter.trackedterm': {
'Meta': {'object_name': 'TrackedTerm'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'phrase': ('django.db.models.fields.CharField', [], {'max_length': '60'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'tracked_terms'", 'to': u"orm['twitter.Stream']"})
},
u'twitter.tweet': {
'Meta': {'object_name': 'Tweet'},
'coordinates': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'created_at': ('django.db.models.fields.DateTimeField', [], {}),
'entities': ('jsonfield.fields.JSONField', [], {}),
'favorite_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'in_reply_to_screen_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'in_reply_to_status_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'in_reply_to_user_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True'}),
'place': ('jsonfield.fields.JSONField', [], {'null': 'True'}),
'possibly_sensitive': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'raw_data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'retweet_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'stream': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['twitter.Stream']"}),
'text': ('django.db.models.fields.CharField', [], {'max_length': '400'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['twitter.TwitterUser']"})
},
u'twitter.twitteruser': {
'Meta': {'object_name': 'TwitterUser'},
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '500'}),
'favourites_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'followers_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'friends_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'profile_image_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000'}),
'raw_data': ('jsonfield.fields.JSONField', [], {'default': '{}'}),
'screen_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'verified': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
}
}
complete_apps = ['twitter'] | {
"content_hash": "ee019928cdcc5885b56a84c57ef50ce8",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 146,
"avg_line_length": 63.28395061728395,
"alnum_prop": 0.5446742099102614,
"repo_name": "chalkchisel/django-social-stream",
"id": "44efd4696f7af4a64849bdc8d0c1e2996c8c4c3e",
"size": "5150",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "social_stream/twitter/migrations/0003_auto__add_field_stream_running.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "99116"
}
],
"symlink_target": ""
} |
import json
import requests
url = 'https://api.github.com/search/issues?q=is:issue+label:official-cve-feed+\
state:closed+repo:kubernetes/kubernetes&per_page=100'
headers = {'Accept': 'application/vnd.github.v3+json'}
res = requests.get(url, headers=headers)
gh_items = res.json()['items']
# Use link header to iterate over pages
# https://docs.github.com/en/rest/overview/resources-in-the-rest-api#pagination
# https://datatracker.ietf.org/doc/html/rfc5988
# Please note that if there is a great number of pages, this unauthenticated
# request may be subject to rate limits and fail.
# https://docs.github.com/en/rest/overview/resources-in-the-rest-api#rate-limiting
while 'next' in res.links:
res = requests.get(res.links['next']['url'], headers=headers)
gh_items.extend(res.json()['items'])
cve_list = []
for item in gh_items:
cve = {"issue_url": None, "number": None, "cve_id": None,
"summary": None, "cve_url": None, "google_group_url": None}
cve['issue_url'] = item['html_url']
cve['number'] = item['number']
title = item['title'].replace(" -", ":")
title = title.split(": ")
if len(title) == 1:
cve_id = None
cve['cve_id'] = None
cve['cve_url'] = None
cve['summary'] = title[0]
cve['google_group_url'] = None
else:
cve_id = title[0]
cve['cve_id'] = title[0]
if len(title) == 3:
cve['summary'] = title[2]
else:
cve['summary'] = title[1]
cve['cve_url'] = f"https://www.cve.org/cverecord?id={cve_id}"
cve['google_group_url'] = \
f"https://groups.google.com/g/kubernetes-announce/search?q={cve_id}"
cve_list.append(cve)
cves = json.dumps(cve_list, sort_keys=True, indent=4)
print(cves)
# write the final cve list to official_cve_feed.json
with open("official-cve-feed.json", "w") as cvejson:
cvejson.write(cves)
| {
"content_hash": "e6f56ee4e166d8fd7f4cf3153343c75b",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 82,
"avg_line_length": 37.88,
"alnum_prop": 0.6325237592397043,
"repo_name": "kubernetes/sig-security",
"id": "f51733b61e4826f5463031c0a6a684886c0388cc",
"size": "2506",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sig-security-tooling/cve-feed/hack/fetch-official-cve-feed.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "193"
},
{
"name": "Python",
"bytes": "6176"
},
{
"name": "Shell",
"bytes": "2564"
}
],
"symlink_target": ""
} |
def extractWanderingtranslationsWordpressCom(item):
'''
Parser for 'wanderingtranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if "WATTT" in item['tags']:
return buildReleaseMessageWithType(item, "WATTT", vol, chp, frag=frag, postfix=postfix)
return False
| {
"content_hash": "1d960d35dd35b75571e008f562915fb9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 89,
"avg_line_length": 28.428571428571427,
"alnum_prop": 0.7361809045226131,
"repo_name": "fake-name/ReadableWebProxy",
"id": "6d7696bf0dc80fff4610cb4c847477f951ac83fe",
"size": "399",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WebMirror/management/rss_parser_funcs/feed_parse_extractWanderingtranslationsWordpressCom.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "105811"
},
{
"name": "Dockerfile",
"bytes": "1178"
},
{
"name": "HTML",
"bytes": "119737"
},
{
"name": "JavaScript",
"bytes": "3006524"
},
{
"name": "Jupyter Notebook",
"bytes": "148075"
},
{
"name": "Mako",
"bytes": "1454"
},
{
"name": "Python",
"bytes": "5264346"
},
{
"name": "Shell",
"bytes": "1059"
}
],
"symlink_target": ""
} |
"""
[env]
# Conda Environment
conda create --name automate_po_job python=3.9.7
conda info --envs
source activate automate_po_job
conda deactivate
# if needed to remove
conda env remove -n [NAME_OF_THE_CONDA_ENVIRONMENT]
[path]
cd /Users/brunoflaven/Documents/03_git/BlogArticlesExamples/automate_po_job_demo_support/002_pythonspot/
[file]
python pythonspot_dummy.py
[source]
https://pythonspot.com/orm-with-sqlalchemy/
[required]
# install
pip install streamlit
pip install watchdog
pip install sqlalchemy
pip install -r requirements.txt
CREATE TABLE student (
id INTEGER NOT NULL,
username VARCHAR,
firstname VARCHAR,
lastname VARCHAR,
university VARCHAR,
PRIMARY KEY (id)
)
"""
import datetime
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
# required check schema_pythonspot_tabledef.py
from schema_pythonspot_tabledef import *
# personal configuration
import config_values.values_conf as conf
### 2. VALUES ###
URI_SQLITE_DB_PYTHONSPOT = conf.URI_SQLITE_DB_PYTHONSPOT
# engine = create_engine(f'sqlite:///{URI_SQLITE_DB_PYTHONSPOT}', echo=True)
engine = create_engine(f'sqlite:///{URI_SQLITE_DB_PYTHONSPOT}', echo=False)
# create a Session
Session = sessionmaker(bind=engine)
session = Session()
# Create objects
user = Student("james", "James", "Boogie", "MIT")
session.add(user)
user = Student("lara", "Lara", "Miami", "UU")
session.add(user)
user = Student("eric", "Eric", "York", "Stanford")
session.add(user)
user = Student("bruno", "Bruno", "Bentolila", "IUOO")
session.add(user)
user = Student("prisca", "Prisca", "Jore", "Heidelberg University")
session.add(user)
# commit the record the database
session.commit()
# DEBUG
print(f'2. ROWS INSERTED IN :: {URI_SQLITE_DB_PYTHONSPOT}')
| {
"content_hash": "7d36e388e34683e746fc5d3fe3fd1dbd",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 104,
"avg_line_length": 19.633333333333333,
"alnum_prop": 0.7357102433503112,
"repo_name": "bflaven/BlogArticlesExamples",
"id": "8e61a111db1fa7c2d50f953236b0747feb048f90",
"size": "1810",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "automate_po_job_demo_support/002_pythonspot/pythonspot_dummy.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "AppleScript",
"bytes": "2756"
},
{
"name": "CSS",
"bytes": "3497"
},
{
"name": "CoffeeScript",
"bytes": "1785"
},
{
"name": "Dockerfile",
"bytes": "993"
},
{
"name": "HTML",
"bytes": "23687927"
},
{
"name": "JavaScript",
"bytes": "12838"
},
{
"name": "Jupyter Notebook",
"bytes": "2918640"
},
{
"name": "Makefile",
"bytes": "4058"
},
{
"name": "PHP",
"bytes": "223161"
},
{
"name": "Python",
"bytes": "1461699"
},
{
"name": "Shell",
"bytes": "12291"
}
],
"symlink_target": ""
} |
import re
import nltk
from . import tokenizers as tok
from . import summary as summ
# mod = moderate
def mod_max_unit_func(unit_num):
return 5 * unit_num ** (1/12)
class Document(object):
def __init__(self, filename=None, text=None,
max_unit_func=lambda x: 5*x**(1/12)):
self.filename = filename
self.text = text
# original text
self.words = None
self.sentences = None
self.paragraphs = None
self.num_words = None
self.num_paragraphs = None
self.num_sentences = None
self.genre = None
self.summary = None
self.max_unit_func = max_unit_func
self.recursive = False
def build(self):
self.load()
self.tokenize()
self.get_count()
self.get_summary()
def load(self):
if self.filename:
self.text = summ.file_to_doc(self.filename)
else:
pass
def tokenize(self):
regex = re.compile(r"\((Applause|APPLAUSE|Laughter|LAUGHTER)\.\) ?",
re.IGNORECASE)
cleaned_text = regex.sub("", self.text)
self.words = cleaned_text.split()
self.sentences = tok.tokenize_to_sentences(
cleaned_text.replace("\n", " "))
self.paragraphs = tok.tokenize_to_paragraphs(cleaned_text)
def get_count(self):
self.num_words = len(self.words)
self.num_sentences = len(self.sentences)
self.num_paragraphs = len(self.paragraphs)
# both unit_type and num_units must be given to get a fixed summary
def get_summary(self, unit_type=None, max_units=None, stem=True):
if unit_type is not None and max_units is not None:
print("Hello!")
if unit_type == 0:
units = self.sentences
divider = " "
else:
units = self.paragraphs
# for proper printing
divider = "\n"
else:
if self.num_words > 500 and self.num_paragraphs > 5:
units = self.paragraphs
unit_type = 1
unit_count = self.num_paragraphs
divider = "\n"
else:
units = self.sentences
unit_type = 0
unit_count = self.num_sentences
divider = " "
max_units = round(self.max_unit_func(unit_count))
summary_units = summ.get_tfidf_summary_units(units, max_units, stem)
# for long paragraphs
if unit_type == 1:
for i, unit in enumerate(summary_units):
doc = Document(text=unit,)
doc.max_unit_func = lambda x: 3*x**(1/12)
doc.recursive = True
doc.build()
summary_units[i] = doc.summary
self.summary = divider.join(summary_units)
degree = 1
while len(self.summary.split()) > 500:
self.shorten_summary(degree)
degree += 1
def shorten_summary(self, degree):
doc = Document(text=self.summary,
max_unit_func=lambda x: (5-degree)*x**(1/12))
doc.build()
self.summary = doc.summary
def pprint(self):
print("********* {} *********\n".format(self.filename))
print("TEXT STATISTICS:")
print("Word #: {}; Sentence #: {}; Paragraph #: {};\n".format(
self.num_words, self.num_sentences, self.num_paragraphs))
print("SUMMARY:\n")
summary_paragraphs = tok.tokenize_to_paragraphs(self.summary)
for sent in summary_paragraphs:
print(sent)
print("\n")
print("SUMMARY STATISTICS:")
print("Word #: {}: Sentence #: {}; Paragraph #: {};\n".format(
len(self.summary.split()),
len(tok.tokenize_to_sentences(self.summary)),
len(summary_paragraphs),))
| {
"content_hash": "304505903601d16e0590f4d5ece9ecf8",
"timestamp": "",
"source": "github",
"line_count": 127,
"max_line_length": 89,
"avg_line_length": 31.094488188976378,
"alnum_prop": 0.5292479108635098,
"repo_name": "euirim/maple",
"id": "550d577e5244d5d6bb583b62ee1d267452031720",
"size": "3949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "engine/documents_old.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "60"
},
{
"name": "Python",
"bytes": "24822"
}
],
"symlink_target": ""
} |
from decorator import decorate
def get_recorder(spy, attribute, context):
decorator_map = {
'function': FunctionRecorder(spy),
'instance_method': InstanceMethodRecorder(spy),
'class_method': ClassMethodRecorder(spy),
'static_method': StaticMethodRecorder(spy)
}
return decorate(attribute, context, decorator_map)
class Recorder(object):
def __init__(self, spy):
self.spy = spy
def record(self, fn, *args, **kwargs):
raise NotImplementedError
def __call__(self, fn, *args, **kwargs):
return self.record(fn, *args, **kwargs)
class FunctionRecorder(Recorder):
def record(self, fn, *args, **kwargs):
return_value = fn(*args, **kwargs)
self.spy.add_call(
fn,
args,
kwargs,
return_value
)
return return_value
class InstanceMethodRecorder(Recorder):
def record(self, fn, *args, **kwargs):
return_value = fn(*args, **kwargs)
self.spy.add_call(
fn,
args[1:], # discard 'self'
kwargs,
return_value
)
return return_value
class ClassMethodRecorder(InstanceMethodRecorder):
pass
class StaticMethodRecorder(FunctionRecorder):
pass
| {
"content_hash": "6b3eedee624cf367267157e89a35e7fd",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 55,
"avg_line_length": 21.89830508474576,
"alnum_prop": 0.5928792569659442,
"repo_name": "bywires/chara",
"id": "3c4345cabdf03f552ca311d2a4b36af04b12c7a9",
"size": "1292",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "chara/recorders.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "22110"
},
{
"name": "Shell",
"bytes": "77"
}
],
"symlink_target": ""
} |
from bilanci.tree_dict_models import deep_sum
from bilanci.utils import couch, nearly_equal
from bilanci.utils.comuni import FLMapper
from django.test import TestCase
from django.core.management import BaseCommand
from django.conf import settings
from collections import OrderedDict
from optparse import make_option
import logging
__author__ = 'guglielmo'
class Command(BaseCommand, TestCase):
option_list = BaseCommand.option_list + (
make_option('--years',
dest='years',
default='',
help='Years to fetch. From 2002 to 2012. Use one of this formats: 2012 or 2003-2006 or 2002,2004,2006'),
make_option('--cities',
dest='cities',
default='',
help='Cities codes or slugs. Use comma to separate values: Roma,Napoli,Torino or "All"'),
make_option('--couchdb-server',
dest='couchdb_server',
default=settings.COUCHDB_DEFAULT_SERVER,
help='CouchDB server to connect to (defaults to staging).'),
)
help = 'Verify the bilanci_simple values and sums.'
logger = logging.getLogger('management')
comuni_dicts = {}
def handle(self, *args, **options):
verbosity = options['verbosity']
if verbosity == '0':
self.logger.setLevel(logging.ERROR)
elif verbosity == '1':
self.logger.setLevel(logging.WARNING)
elif verbosity == '2':
self.logger.setLevel(logging.INFO)
elif verbosity == '3':
self.logger.setLevel(logging.DEBUG)
cities_codes = options['cities']
if not cities_codes:
raise Exception("Missing city parameter")
mapper = FLMapper()
cities = mapper.get_cities(cities_codes)
if cities_codes.lower() != 'all':
self.logger.info("Processing cities: {0}".format(cities))
years = options['years']
if not years:
raise Exception("Missing years parameter")
if "-" in years:
(start_year, end_year) = years.split("-")
years = range(int(start_year), int(end_year)+1)
else:
years = [int(y.strip()) for y in years.split(",") if 2001 < int(y.strip()) < 2014]
if not years:
raise Exception("No suitable year found in {0}".format(years))
self.logger.info("Processing years: {0}".format(years))
couchdb_server_name = options['couchdb_server']
if couchdb_server_name not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server name.")
###
# Couchdb connections
###
couchdb_server_alias = options['couchdb_server']
if couchdb_server_alias not in settings.COUCHDB_SERVERS:
raise Exception("Unknown couchdb server alias.")
# hook to simple DB
simple_db_name = 'bilanci_simple'
simple_db = couch.connect(
simple_db_name,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
self.logger.info("Hooked to simple DB: {0}".format(simple_db_name))
# hook to normalized DB (for comparisons)
norm_db_name = 'bilanci_voci'
norm_db = couch.connect(
norm_db_name,
couchdb_server_settings=settings.COUCHDB_SERVERS[couchdb_server_alias]
)
self.logger.info("Hooked to normalized DB: {0}".format(norm_db_name))
entrate_sections = OrderedDict([
('Accertamenti', 0),
('Riscossioni in conto competenza', 1),
('Riscossioni in conto residui', 2),
])
spese_sections = OrderedDict([
('Impegni', 0),
('Pagamenti in conto competenza', 1),
('Pagamenti in conto residui', 2),
])
# totali_* will hold a list of all voices to be compared
# norm refers to the normalized tree
# simp refers to the simple tree
totali_preventivo_entrate = [
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-tributarie',
'data', 'totale titolo i', 0),
'simp': ('preventivo', 'ENTRATE', 'Imposte e tasse', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-contributi-e-trasferimenti-correnti-dello-stato-della-regione-e-di-altri-enti-pubblici-anche-in-rapporto-funzioni-delegate-dalla-regione',
'data', 'totale titolo ii', 0),
'simp': ('preventivo', 'ENTRATE', 'Contributi pubblici', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-extratributarie',
'data', 'totale titolo iii', 0),
'simp': ('preventivo', 'ENTRATE', 'Entrate extratributarie', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-alienazione-da-trasferimenti-di-capitali-e-da-riscossioni-di-crediti',
'data', 'totale titolo iv', 0),
'simp': ('preventivo', 'ENTRATE', 'Vendite e trasferimenti di capitali', 'TOTALE')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-accensioni-di-prestiti',
'data', 'totale titolo v', 0),
'simp': ('preventivo', 'ENTRATE', 'Prestiti')},
{'norm': ('preventivo', '02',
'quadro-2-entrate-entrate-derivanti-da-servizi-per-conto-di-terzi',
'data', 'totale titolo vi', 0),
'simp': ('preventivo', 'ENTRATE', 'Entrate per conto terzi')},
]
totali_consuntivo_entrate = []
for section_name, section_idx in entrate_sections.items():
totali_consuntivo_entrate.extend([
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-i-entrate-tributarie',
'data', 'totale entrate tributarie', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-ii-entrate-derivanti-da-contributi-e-trasferimenti-correnti',
'data', 'totale entrate derivanti da contributi e trasferimenti correnti', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Contributi pubblici', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-iii-entrate-extratributarie',
'data', 'totale entrate extratributarie', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-iv-entrate-derivanti-da-alienazione-da-trasfer-di-capitali-e-da-riscossioni-di-crediti',
'data', 'totale entrate derivanti da alienazione, trasferimenti di capitali e da riscossioni di crediti', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Vendite e trasferimenti di capitali', 'TOTALE')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-v-entrate-derivanti-da-accensione-di-prestiti',
'data', 'totale entrate derivanti da accensione di prestiti', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Prestiti')},
{'norm': ('consuntivo', '02',
'quadro-2-entrate-titolo-vi-entrate-da-servizi-per-conto-di-terzi',
'data', 'totale entrate da servizi per conto di terzi', section_idx),
'simp': ('consuntivo', 'ENTRATE', section_name, 'Entrate per conto terzi')},
])
totali_consuntivo_spese = []
# quadro 3
# section_name and section_idx contains the Impegni/Competenze/Residui name and indexes
for section_name, section_idx in spese_sections.items():
totali_consuntivo_spese.extend([
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'totale generale delle spese', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'TOTALE')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo i - spese correnti', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Spese correnti', 'TOTALE')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo ii - spese in c/capitale', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Spese per investimenti', 'TOTALE')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo iii - spese per rimborso di prestiti', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Prestiti')},
{'norm': ('consuntivo', '03',
'quadro-3-riepilogo-generale-delle-spese',
'data', 'titolo iv - spese per servirzi per conto di terzi', section_idx),
'simp': ('consuntivo', 'SPESE', section_name, 'Spese per conto terzi')},
])
# quadro 4
totali_consuntivo_spese.extend([
{'norm': ('consuntivo', '04',
'quadro-4-a-impegni',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Impegni', 'Spese correnti', 'TOTALE')},
{'norm': ('consuntivo', '04',
'quadro-4-b-pagamenti-in-conto-competenza',
'data', 'totali', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto competenza', 'Spese correnti', 'TOTALE')},
{'norm': ('consuntivo', '04',
'quadro-4-c-pagamenti-in-conto-residui',
'data', 'totali', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto residui', 'Spese correnti', 'TOTALE')},
])
# quadro 5
totali_consuntivo_spese.extend([
{'norm': ('consuntivo', '05',
'quadro-5-a-impegni',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Impegni', 'Spese per investimenti', 'TOTALE')},
{'norm': ('consuntivo', '05',
'quadro-5-b-pagamenti-in-conto-competenza',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto competenza', 'Spese per investimenti', 'TOTALE')},
{'norm': ('consuntivo', '05',
'quadro-5-c-pagamenti-in-conto-residui',
'data', 'totale', -1),
'simp': ('consuntivo', 'SPESE', 'Pagamenti in conto residui', 'Spese per investimenti', 'TOTALE')},
])
somme_consuntivo_nodes = []
for section_name in entrate_sections.keys():
somme_consuntivo_nodes.extend([
('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse'),
('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse', 'Imposte'),
('consuntivo', 'ENTRATE', section_name, 'Imposte e tasse', 'Tasse'),
('consuntivo', 'ENTRATE', section_name, 'Contributi pubblici'),
('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie'),
('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie', 'Servizi pubblici'),
('consuntivo', 'ENTRATE', section_name, 'Entrate extratributarie', 'Proventi di beni dell\'ente'),
('consuntivo', 'ENTRATE', section_name, 'Vendite e trasferimenti di capitali'),
('consuntivo', 'ENTRATE', section_name, 'Vendite e trasferimenti di capitali', 'Trasferimenti di capitali da privati'),
])
somme_preventivo_nodes = [
('preventivo', 'ENTRATE', 'Imposte e tasse'),
('preventivo', 'ENTRATE', 'Imposte e tasse', 'Imposte'),
('preventivo', 'ENTRATE', 'Imposte e tasse', 'Tasse'),
('preventivo', 'ENTRATE', 'Contributi pubblici'),
('preventivo', 'ENTRATE', 'Entrate extratributarie'),
('preventivo', 'ENTRATE', 'Vendite e trasferimenti di capitali'),
]
for city in cities:
for year in years:
self.logger.info("Processing city of {0}, year {1}".format(
city, year
))
code = "{}_{}".format(year, city)
norm_doc_id = "{}_{}".format(year, city)
simple_doc_id = city
# both documents need to exist in the dbs
self.assertTrue(self.test_couch_doc_exists(norm_db, norm_doc_id),
"Could not find {}".format(norm_doc_id))
self.assertTrue(self.test_couch_doc_exists(simple_db, simple_doc_id))
norm_doc = norm_db[norm_doc_id]
simple_doc = simple_db[simple_doc_id]
# preventivo tests
if len(simple_doc[str(year)]['preventivo'].keys()) > 0:
self.logger.debug("::::: Testing first level totals for preventivo entrate")
self.test_totali(totali_preventivo_entrate, simple_doc, norm_doc, year)
self.logger.debug("::::: Testing totale - funzioni - interventi for preventivo/spese")
for tipo_spese in (u'Spese correnti', u'Spese per investimenti'):
node = simple_doc[str(year)]['preventivo']['SPESE'][tipo_spese]
label = u"/Preventivo/{0}".format(tipo_spese)
self.test_totale_funzioni_interventi(label, node, year)
self.logger.debug("::::: Testing inner sums for preventivo entrate")
self.test_somme(somme_preventivo_nodes, simple_doc, year)
# consuntivo tests
if len(simple_doc[str(year)]['consuntivo'].keys()) > 0:
self.logger.debug("::::: Testing first level totals for consuntivo entrate")
self.test_totali(totali_consuntivo_entrate, simple_doc, norm_doc, year)
self.logger.debug("::::: Testing first level totals for consuntivo spese")
self.test_totali(totali_consuntivo_spese, simple_doc, norm_doc, year)
self.logger.debug("::::: Testing totale - funzioni - interventi for consuntivo/spese")
for section_name in spese_sections.keys():
for tipo_spese in ('Spese correnti', 'Spese per investimenti'):
node = simple_doc[str(year)]['consuntivo']['SPESE'][section_name][tipo_spese]
label = u"/Consuntivo/{0}/{1}".format(section_name, tipo_spese)
self.test_totale_funzioni_interventi(label, node, year)
self.logger.debug("::::: Testing inner sums for consuntivo entrate")
self.test_somme(somme_consuntivo_nodes, simple_doc, year)
###
# TESTS
###
def test_couch_doc_exists(self, couch_db, doc_id):
"""
couch db connection is correct and document exists
"""
return doc_id in couch_db
###
# totals for first level sections in normalized and
# simplified trees are compared
###
def test_totali(self, totali, simple_doc, norm_doc, year):
"""
totals for 1st level sections of the preventivo/entrate in the normalized tree (quadro 2)
are compared with the corresponding values in the simplified tree
"""
for tot in totali:
# extract year section from the simple doc (simple docs contain all years)
tot_simp = simple_doc[str(year)]
tot_norm = norm_doc
# drill through the tree to fetch the leaf value in tot['simp']
for t in tot['simp']:
tot_simp = tot_simp[t]
# drill through the tree to fetch the leaf value in tot['simp']
# catch exception om totale/totali, trying both before failing
# in the normalized tree
for t in tot['norm']:
if t == 'totale':
try:
tot_norm = tot_norm['totale']
except KeyError:
try:
tot_norm = tot_norm['totali']
except KeyError:
# log a warning and break away from the inner for loop
# do not execute the else section
self.logger.warning(
"totale/i key not found in bilanci_voce. node: {0}".format(
tot['norm']
)
)
break
else:
tot_norm = tot_norm[t]
else:
# transform the string representation in the normalized doc,
# into an integer (used in the simplified doc)
# so that the comparison is possible
if tot_norm != '':
tot_norm = int(round(float(tot_norm.replace('.', '').replace(',','.'))))
else:
tot_norm = 0
if tot_simp != tot_norm:
self.logger.warning("Totals are different.\n\tnorm val:{0}, node: {1}\n\tsimp val:{2}, node: {3}".format(
tot_norm, tot['norm'],
tot_simp, tot['simp'],
))
###
# sum of funzioni, interventi and the explicit totals in
# the simplified tree are compared
###
def test_totale_funzioni_interventi(self, simple_tree_label, simple_tree_node, year):
totale = simple_tree_node['TOTALE']
somma_funzioni = deep_sum(simple_tree_node['funzioni'])
somma_interventi = deep_sum(simple_tree_node['interventi'])
if self.nearly_equal(totale, somma_interventi) and \
self.nearly_equal(totale, somma_funzioni):
self.logger.debug(u"node: {0}. OK. totale: {1}".format(
simple_tree_label, totale
))
else:
self.logger.warning(u"\nnode: {0}. NOT OK.\n totale:\t\t {1}\n somma_funzioni:\t {2}\n somma_interventi:\t {3}".format(
simple_tree_label, totale, somma_funzioni, somma_interventi
))
# dump non-matching details to logger
if not self.nearly_equal(totale, somma_funzioni):
_ = deep_sum(simple_tree_node['funzioni'], logger=self.logger)
if not self.nearly_equal(totale, somma_interventi):
_ = deep_sum(simple_tree_node['interventi'], logger=self.logger)
###
# first level explicit totals and sums of underlying sections
# in the simplified tree are compared (consistency test)
###
def test_somme(self, nodes, simple_doc, year):
"""
Tests the sum of sections of the tree, against the totals.
This verifies the consistency of the simplified tree,
and, indirectly, the completeness and correctness of the
data fetched from the normalized tree.
"""
test_results = OrderedDict()
for node in nodes:
node_path = u"/{0}/../{1}".format(node[0], node[-1])
simp = simple_doc[str(year)]
for t in node:
simp = simp[t]
somma = deep_sum(simp)
totale = simp['TOTALE']
test_results[node[-1]] = (totale == somma)
if self.nearly_equal(totale, somma):
self.logger.debug(u"node: {0}. OK. totale: {1}".format(
node_path, totale
))
else:
self.logger.warning(u"node: {0}. NOT OK. totale: {1}. somma: {2}".format(
node_path, totale, somma
))
# dump non-matching details to logger
if not self.nearly_equal(totale, somma):
_ = deep_sum(simp, logger=self.logger)
def nearly_equal(self, a, b):
"""
Return true if the numbers are equals or close matches
"""
return nearly_equal(a, b, threshold=settings.NEARLY_EQUAL_THRESHOLD) | {
"content_hash": "564c55a3bf2e0fe47cd53c0bd79db39f",
"timestamp": "",
"source": "github",
"line_count": 447,
"max_line_length": 199,
"avg_line_length": 46.982102908277405,
"alnum_prop": 0.5337364887386314,
"repo_name": "DeppSRL/open_bilanci",
"id": "f211fdadcfdfb098605f027d557a031674bd5f8c",
"size": "21001",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bilanci_project/bilanci/management/commands/verify_simple.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "614"
},
{
"name": "CSS",
"bytes": "260422"
},
{
"name": "HTML",
"bytes": "461273"
},
{
"name": "JavaScript",
"bytes": "68685"
},
{
"name": "Makefile",
"bytes": "515"
},
{
"name": "Nginx",
"bytes": "675"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "638816"
},
{
"name": "Shell",
"bytes": "678328"
}
],
"symlink_target": ""
} |
import socket
from test_framework.socks5 import Socks5Configuration, Socks5Command, Socks5Server, AddressType
from test_framework.test_framework import NavCoinTestFramework
from test_framework.util import *
from test_framework.netutil import test_ipv6_local
'''
Test plan:
- Start navcoind's with different proxy configurations
- Use addnode to initiate connections
- Verify that proxies are connected to, and the right connection command is given
- Proxy configurations to test on navcoind side:
- `-proxy` (proxy everything)
- `-onion` (proxy just onions)
- `-proxyrandomize` Circuit randomization
- Proxy configurations to test on proxy side,
- support no authentication (other proxy)
- support no authentication + user/pass authentication (Tor)
- proxy on IPv6
- Create various proxies (as threads)
- Create navcoinds that connect to them
- Manipulate the navcoinds using addnode (onetry) an observe effects
addnode connect to IPv4
addnode connect to IPv6
addnode connect to onion
addnode connect to generic DNS name
'''
class ProxyTest(NavCoinTestFramework):
def __init__(self):
super().__init__()
self.num_nodes = 4
self.setup_clean_chain = False
self.have_ipv6 = test_ipv6_local()
# Create two proxies on different ports
# ... one unauthenticated
self.conf1 = Socks5Configuration()
self.conf1.addr = ('127.0.0.1', 13000 + (os.getpid() % 1000))
self.conf1.unauth = True
self.conf1.auth = False
# ... one supporting authenticated and unauthenticated (Tor)
self.conf2 = Socks5Configuration()
self.conf2.addr = ('127.0.0.1', 14000 + (os.getpid() % 1000))
self.conf2.unauth = True
self.conf2.auth = True
if self.have_ipv6:
# ... one on IPv6 with similar configuration
self.conf3 = Socks5Configuration()
self.conf3.af = socket.AF_INET6
self.conf3.addr = ('::1', 15000 + (os.getpid() % 1000))
self.conf3.unauth = True
self.conf3.auth = True
else:
print("Warning: testing without local IPv6 support")
self.serv1 = Socks5Server(self.conf1)
self.serv1.start()
self.serv2 = Socks5Server(self.conf2)
self.serv2.start()
if self.have_ipv6:
self.serv3 = Socks5Server(self.conf3)
self.serv3.start()
def setup_nodes(self):
# Note: proxies are not used to connect to local nodes
# this is because the proxy to use is based on CService.GetNetwork(), which return NET_UNROUTABLE for localhost
args = [
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-proxyrandomize=1'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf1.addr),'-onion=%s:%i' % (self.conf2.addr),'-proxyrandomize=0'],
['-listen', '-debug=net', '-debug=proxy', '-proxy=%s:%i' % (self.conf2.addr),'-proxyrandomize=1'],
[]
]
if self.have_ipv6:
args[3] = ['-listen', '-debug=net', '-debug=proxy', '-proxy=[%s]:%i' % (self.conf3.addr),'-proxyrandomize=0', '-noonion']
return start_nodes(self.num_nodes, self.options.tmpdir, extra_args=args)
def node_test(self, node, proxies, auth, test_onion=True):
rv = []
# Test: outgoing IPv4 connection through node
node.addnode("15.61.23.23:1234", "onetry")
cmd = proxies[0].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: navcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"15.61.23.23")
assert_equal(cmd.port, 1234)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if self.have_ipv6:
# Test: outgoing IPv6 connection through node
node.addnode("[1233:3432:2434:2343:3234:2345:6546:4534]:5443", "onetry")
cmd = proxies[1].queue.get()
assert(isinstance(cmd, Socks5Command))
# Note: navcoind's SOCKS5 implementation only sends atyp DOMAINNAME, even if connecting directly to IPv4/IPv6
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"1233:3432:2434:2343:3234:2345:6546:4534")
assert_equal(cmd.port, 5443)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
if test_onion:
# Test: outgoing onion connection through node
node.addnode("navcoinostk4e4re.onion:5556", "onetry")
cmd = proxies[2].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"navcoinostk4e4re.onion")
assert_equal(cmd.port, 5556)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
# Test: outgoing DNS name connection through node
node.addnode("node.noumenon:5556", "onetry")
cmd = proxies[3].queue.get()
assert(isinstance(cmd, Socks5Command))
assert_equal(cmd.atyp, AddressType.DOMAINNAME)
assert_equal(cmd.addr, b"node.noumenon")
assert_equal(cmd.port, 5556)
if not auth:
assert_equal(cmd.username, None)
assert_equal(cmd.password, None)
rv.append(cmd)
return rv
def run_test(self):
# basic -proxy
self.node_test(self.nodes[0], [self.serv1, self.serv1, self.serv1, self.serv1], False)
# -proxy plus -onion
self.node_test(self.nodes[1], [self.serv1, self.serv1, self.serv2, self.serv1], False)
# -proxy plus -onion, -proxyrandomize
rv = self.node_test(self.nodes[2], [self.serv2, self.serv2, self.serv2, self.serv2], True)
# Check that credentials as used for -proxyrandomize connections are unique
credentials = set((x.username,x.password) for x in rv)
assert_equal(len(credentials), len(rv))
if self.have_ipv6:
# proxy on IPv6 localhost
self.node_test(self.nodes[3], [self.serv3, self.serv3, self.serv3, self.serv3], False, False)
def networks_dict(d):
r = {}
for x in d['networks']:
r[x['name']] = x
return r
# test RPC getnetworkinfo
n0 = networks_dict(self.nodes[0].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n0[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n0[net]['proxy_randomize_credentials'], True)
assert_equal(n0['onion']['reachable'], True)
n1 = networks_dict(self.nodes[1].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n1[net]['proxy'], '%s:%i' % (self.conf1.addr))
assert_equal(n1[net]['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n1['onion']['proxy_randomize_credentials'], False)
assert_equal(n1['onion']['reachable'], True)
n2 = networks_dict(self.nodes[2].getnetworkinfo())
for net in ['ipv4','ipv6','onion']:
assert_equal(n2[net]['proxy'], '%s:%i' % (self.conf2.addr))
assert_equal(n2[net]['proxy_randomize_credentials'], True)
assert_equal(n2['onion']['reachable'], True)
if self.have_ipv6:
n3 = networks_dict(self.nodes[3].getnetworkinfo())
for net in ['ipv4','ipv6']:
assert_equal(n3[net]['proxy'], '[%s]:%i' % (self.conf3.addr))
assert_equal(n3[net]['proxy_randomize_credentials'], False)
assert_equal(n3['onion']['reachable'], False)
if __name__ == '__main__':
ProxyTest().main()
| {
"content_hash": "89eea0ba37c799e0d600cba1ffc83baa",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 146,
"avg_line_length": 42.584210526315786,
"alnum_prop": 0.6051167964404894,
"repo_name": "navcoindev/navcoin-core",
"id": "3e0a6f50f4f8958f925250c65e6bd3e4f774041e",
"size": "8306",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qa/rpc-tests/proxy_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "3655915"
},
{
"name": "C++",
"bytes": "4954999"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "M4",
"bytes": "176582"
},
{
"name": "Makefile",
"bytes": "105930"
},
{
"name": "Objective-C",
"bytes": "3771"
},
{
"name": "Objective-C++",
"bytes": "7240"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "946426"
},
{
"name": "QMake",
"bytes": "2020"
},
{
"name": "Roff",
"bytes": "3792"
},
{
"name": "Shell",
"bytes": "426873"
}
],
"symlink_target": ""
} |
from idaapi import *
from idautils import *
from idc import *
from SimpleXMLRPCServer import SimpleXMLRPCServer
import cPickle
def is_connected() :
return True
def wrapper_get_raw(oops) :
F = {}
for function_ea in Functions() :
F[ function_ea ] = []
f_start = function_ea
f_end = GetFunctionAttr(function_ea, FUNCATTR_END)
edges = set()
boundaries = set((f_start,))
F[ function_ea ].append( GetFunctionName(function_ea) )
for head in Heads(f_start, f_end) :
if isCode( GetFlags( head ) ) :
F[ function_ea ].append( (head, GetMnem(head), GetOpnd(head, 0), GetOpnd(head, 1), GetOpnd(head, 2)) )
refs = CodeRefsFrom(head, 0)
refs = set(filter(lambda x: x>=f_start and x<=f_end, refs))
if refs :
next_head = NextHead(head, f_end)
if isFlow(GetFlags(next_head)):
refs.add(next_head)
# Update the boundaries found so far.
boundaries.update(refs)
# For each of the references found, and edge is
# created.
for r in refs:
# If the flow could also come from the address
# previous to the destination of the branching
# an edge is created.
if isFlow(GetFlags(r)):
edges.add((PrevHead(r, f_start), r))
edges.add((head, r))
#print edges, boundaries
# Let's build the list of (startEA, startEA) couples
# for each basic block
sorted_boundaries = sorted(boundaries, reverse = True)
end_addr = PrevHead(f_end, f_start)
bb_addr = []
for begin_addr in sorted_boundaries:
bb_addr.append((begin_addr, end_addr))
# search the next end_addr which could be
# farther than just the previous head
# if data are interlaced in the code
# WARNING: it assumes it won't epicly fail ;)
end_addr = PrevHead(begin_addr, f_start)
while not isCode(GetFlags(end_addr)):
end_addr = PrevHead(end_addr, f_start)
# And finally return the result
bb_addr.reverse()
F[ function_ea ].append( (bb_addr, sorted(edges)) )
return cPickle.dumps( F )
def wrapper_Heads(oops) :
start, end = cPickle.loads(oops)
return cPickle.dumps( [ x for x in Heads( start, end ) ] )
def wrapper_Functions(oops) :
return cPickle.dumps( [ x for x in Functions() ] )
def wrapper_get_function(oops) :
name = cPickle.loads(oops)
for function_ea in Functions() :
if GetFunctionName(function_ea) == name :
return cPickle.dumps( function_ea )
return cPickle.dumps( -1 )
def wrapper_quit(oops) :
qexit(0)
class IDAWrapper :
def _dispatch(self, x, params) :
#fd = open("toto.txt", "w")
#fd.write( x + "\n" )
#fd.write( str(type(params[0])) + "\n" )
#fd.close()
params = cPickle.loads( *params )
if isinstance(params, tuple) == False :
params = (params,)
import types
import idautils
import idc
#[getattr(idautils, a, None) for a in dir(idautils) if isinstance(getattr(idautils, a, None) , types.FunctionType)]
for a in dir(idautils) :
#fd.write( "\t" + a + "\n" )
if a == x :
z = getattr(idautils, a, None)
ret = z( *params )
if type(ret).__name__=='generator' :
return cPickle.dumps( [ i for i in ret ] )
return cPickle.dumps( ret )
for a in dir(idc) :
#fd.write( "\t" + a + "\n" )
if a == x :
z = getattr(idc, a, None)
ret = z( *params )
if type(ret).__name__=='generator' :
return cPickle.dumps( [ i for i in ret ] )
return cPickle.dumps( ret )
return cPickle.dumps( [] )
def main() :
autoWait()
ea = ScreenEA()
server = SimpleXMLRPCServer(("localhost", 9000))
server.register_function(is_connected, "is_connected")
server.register_function(wrapper_get_raw, "get_raw")
server.register_function(wrapper_get_function, "get_function")
server.register_function(wrapper_Heads, "Heads")
server.register_function(wrapper_Functions, "Functions")
server.register_instance(IDAWrapper())
server.register_function(wrapper_quit, "quit")
server.serve_forever()
qexit(0)
main()
| {
"content_hash": "9a6813bdfb2688ac9ebffd44b609eac7",
"timestamp": "",
"source": "github",
"line_count": 143,
"max_line_length": 123,
"avg_line_length": 34.18881118881119,
"alnum_prop": 0.5199427285743505,
"repo_name": "d9w/6858-android-intents",
"id": "0bf8c805137b7e9b1695c527a8686208d58cafa1",
"size": "5659",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "analyzer/androguard/core/binaries/idawrapper.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1911"
},
{
"name": "C++",
"bytes": "73382"
},
{
"name": "Java",
"bytes": "4143"
},
{
"name": "PHP",
"bytes": "1263"
},
{
"name": "Python",
"bytes": "1237389"
}
],
"symlink_target": ""
} |
import os
import numpy as np
import pandas as pd
import subprocess
import optparse
from sklearn.svm import SVC
from sklearn import cross_validation
from sklearn import grid_search
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from sklearn import grid_search
from sklearn.cross_validation import train_test_split
from stacking_create_training_set import stacking_create_training_set
import xml.etree.ElementTree as ET
###################################################
# Testing the model on pure test set of 0.5 size ##
###################################################
########## OUTPUT: p,r,f1 on test set #############
###################################################
#defining the options of the script
#INPUTS: -i duke_config.xml, -N number_of_configurations, -a amplitude_of_perturbation, -g gold_standard_name
parser = optparse.OptionParser()
parser.add_option('-i','--input', dest = 'file_name', help = 'file_name')
parser.add_option('-N','--number', dest = 'N', help = 'number of classifiers',type = int)
parser.add_option('-a','--amplitude', dest = 'a', help = 'amplitude of perturbation',type = float)
parser.add_option('-g','--gold', dest = 'gold_standard_name', help = 'gold_standard_name')
(options, args) = parser.parse_args()
if options.file_name is None:
options.file_name = raw_input('Enter file name:')
if options.N is None:
options.N = raw_input('Enter number of classifiers:')
if options.a is None:
options.a = 0.05 #default to 0.05
if options.gold_standard_name is None:
options.gold_standard_name = raw_input('Enter gold standard file name:')
file_name = options.file_name #define the variables
gold_standard_name = options.gold_standard_name
N = int(options.N)
a = float(options.a)
#open files for writing
output_file_raw = open('ensemble_duke_output_raw_T2_n%d.txt' %N,'w')
#output_file = open('ensemble_duke_stacking_output_T2_n%d.txt' %N,'w')
gold_standard_read = open(gold_standard_name,'rU')
#iterate for each tweaked configuration
#read actual threshold
tree = ET.parse(file_name)
root = tree.getroot()
for thresh in root.iter('threshold'):
central_thresh = float(thresh.text) #central value of the threshold
thresholds = np.linspace(central_thresh - a/2, central_thresh + a/2, N)
for threshold in thresholds:
for thresh in root.iter('threshold'):
thresh.text = str(threshold)
thresh.set('updated','yes')
tree.write('../../../config/FEIII2016/copy_T2.xml')
java_command = ["java","-Xmx5000m", "-cp", "../../../lib/Duke/duke-core/target/*:../../../lib/Duke/duke-dist/target/*:../../../lib/Duke/duke-es/target/*:../../../lib/Duke/duke-json/target/*:../../../lib/Duke/duke-lucene/target/*:../../../lib/Duke/duke-mapdb/target/*:../../../lib/Duke/duke-mongodb/target/*:../../../lib/Duke/duke-server/target/*:../../../lib/Duke/lucene_jar/*", "no.priv.garshol.duke.Duke", "--showmatches","--batchsize=100000", "--threads=4", "../../../config/FEIII2016/copy_T2.xml"]
output_file_raw.write(subprocess.check_output(java_command)) #call duke on the copy.xml file and write the raw output on file
output_file_raw.write('\n')
output_file_raw.write('End of run\n')
output_file_raw.close()
#duke_output_parser('ensemble_duke_output_raw_T2_n%d.txt' %N, 'ensemble_duke_output_union_T2_n%d.txt' %N,'FFIEC','SEC')
#create the training set, named training_set_T1_n%d.csv
stacking_create_training_set('ensemble_duke_output_raw_T2_n%d.txt' %N,'training_set_T2_n%d.csv' %N, gold_standard_name, N)
#read it and make machine learning on it
data = pd.read_csv('training_set_T2_n%d.csv' %N)
#turn data into arrays
X = data.values[:,2:(N+2)] #x variables
y = np.array(data['y']) #class variables
#p_scores = []
#r_scores = []
#f1_scores = []
#T = 5
#repeat the split many times and average the results in order to cancel random fluctuations
#for i in range(T):
#stratified split in train and test set
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.50, stratify = y, random_state = 20)
# fit an SVM with rbf kernel
clf = SVC( kernel = 'rbf',cache_size = 1000)
#hyper-parameter optimization through grid-search cross validation
parameters = {'gamma' : np.logspace(-9,3,30),'C': np.logspace(-2,10,30)}
gs_rbf = grid_search.GridSearchCV(clf,param_grid=parameters,cv = 4)
gs_rbf.fit(X_train,y_train)
#select the best hyper-parameters
clf = gs_rbf.best_estimator_
#save the output
y_predict = np.reshape(clf.predict(X_test),(len(X_test),1))
#p_scores.append(precision_score(y_test,y_predict,average = 'binary'))
#r_scores.append(recall_score(y_test,y_predict,average = 'binary'))
#f1_scores.append(f1_score(y_test,y_predict,average = 'binary'))
#p = np.mean(p_scores)
#r = np.mean(r_scores)
#f1 = np.mean(f1_scores)
p = precision_score(y_test,y_predict,average = 'binary')
r = recall_score(y_test,y_predict,average = 'binary')
f1 = f1_score(y_test,y_predict,average = 'binary')
print "%.3f,%.3f,%.3f" %(p,r,f1) | {
"content_hash": "97bb6c4cc8afdf6502c41ec6d40885f0",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 505,
"avg_line_length": 33.384105960264904,
"alnum_prop": 0.6712953779012101,
"repo_name": "enricopal/STEM",
"id": "3ef5bfec754f2886c2ce61bd7503ce7d971221f1",
"size": "5041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "validation/FEIII2016/precision_recall_threshold_curve/ensemble_duke_T2_stacking_prfoutput.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "847"
},
{
"name": "Java",
"bytes": "636761"
},
{
"name": "Python",
"bytes": "149144"
},
{
"name": "R",
"bytes": "11363"
},
{
"name": "Shell",
"bytes": "7880"
}
],
"symlink_target": ""
} |
"""Eager mode TF policy built using build_tf_policy().
It supports both traced and non-traced eager execution modes."""
import functools
import logging
import threading
from typing import Dict, List, Optional, Tuple
from ray.util.debug import log_once
from ray.rllib.models.catalog import ModelCatalog
from ray.rllib.models.repeated_values import RepeatedValues
from ray.rllib.policy.policy import Policy, LEARNER_STATS_KEY
from ray.rllib.policy.rnn_sequencing import pad_batch_to_sequences_of_same_size
from ray.rllib.policy.sample_batch import SampleBatch
from ray.rllib.utils import add_mixins, force_list
from ray.rllib.utils.annotations import override
from ray.rllib.utils.deprecation import deprecation_warning, DEPRECATED_VALUE
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.threading import with_lock
from ray.rllib.utils.typing import TensorType
tf1, tf, tfv = try_import_tf()
logger = logging.getLogger(__name__)
def _convert_to_tf(x, dtype=None):
if isinstance(x, SampleBatch):
dict_ = {k: v for k, v in x.items() if k != SampleBatch.INFOS}
return tf.nest.map_structure(_convert_to_tf, dict_)
elif isinstance(x, Policy):
return x
# Special handling of "Repeated" values.
elif isinstance(x, RepeatedValues):
return RepeatedValues(
tf.nest.map_structure(_convert_to_tf, x.values), x.lengths,
x.max_len)
if x is not None:
d = dtype
x = tf.nest.map_structure(
lambda f: _convert_to_tf(f, d) if isinstance(f, RepeatedValues)
else tf.convert_to_tensor(f, d) if f is not None else None, x)
return x
def _convert_to_numpy(x):
def _map(x):
if isinstance(x, tf.Tensor):
return x.numpy()
return x
try:
return tf.nest.map_structure(_map, x)
except AttributeError:
raise TypeError(
("Object of type {} has no method to convert to numpy.").format(
type(x)))
def convert_eager_inputs(func):
@functools.wraps(func)
def _func(*args, **kwargs):
if tf.executing_eagerly():
args = [_convert_to_tf(x) for x in args]
# TODO: (sven) find a way to remove key-specific hacks.
kwargs = {
k: _convert_to_tf(
v, dtype=tf.int64 if k == "timestep" else None)
for k, v in kwargs.items()
if k not in {"info_batch", "episodes"}
}
return func(*args, **kwargs)
return _func
def convert_eager_outputs(func):
@functools.wraps(func)
def _func(*args, **kwargs):
out = func(*args, **kwargs)
if tf.executing_eagerly():
out = tf.nest.map_structure(_convert_to_numpy, out)
return out
return _func
def _disallow_var_creation(next_creator, **kw):
v = next_creator(**kw)
raise ValueError("Detected a variable being created during an eager "
"forward pass. Variables should only be created during "
"model initialization: {}".format(v.name))
def traced_eager_policy(eager_policy_cls):
"""Wrapper that enables tracing for all eager policy methods.
This is enabled by the --trace / "eager_tracing" config."""
class TracedEagerPolicy(eager_policy_cls):
def __init__(self, *args, **kwargs):
self._traced_learn_on_batch = None
self._traced_compute_actions = None
self._traced_compute_gradients = None
self._traced_apply_gradients = None
super(TracedEagerPolicy, self).__init__(*args, **kwargs)
@override(eager_policy_cls)
@convert_eager_inputs
@convert_eager_outputs
def _learn_on_batch_eager(self, samples):
if self._traced_learn_on_batch is None:
self._traced_learn_on_batch = tf.function(
super(TracedEagerPolicy, self)._learn_on_batch_eager,
autograph=False,
experimental_relax_shapes=True)
return self._traced_learn_on_batch(samples)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
obs_batch = tf.convert_to_tensor(obs_batch)
state_batches = _convert_to_tf(state_batches)
prev_action_batch = _convert_to_tf(prev_action_batch)
prev_reward_batch = _convert_to_tf(prev_reward_batch)
if self._traced_compute_actions is None:
self._traced_compute_actions = tf.function(
super(TracedEagerPolicy, self).compute_actions,
autograph=False,
experimental_relax_shapes=True)
return self._traced_compute_actions(
obs_batch, state_batches, prev_action_batch, prev_reward_batch,
info_batch, episodes, explore, timestep, **kwargs)
@override(eager_policy_cls)
@convert_eager_inputs
@convert_eager_outputs
def _compute_gradients_eager(self, samples):
if self._traced_compute_gradients is None:
self._traced_compute_gradients = tf.function(
super(TracedEagerPolicy, self).compute_gradients,
autograph=False,
experimental_relax_shapes=True)
return self._traced_compute_gradients(samples)
@override(Policy)
@convert_eager_inputs
@convert_eager_outputs
def apply_gradients(self, grads):
if self._traced_apply_gradients is None:
self._traced_apply_gradients = tf.function(
super(TracedEagerPolicy, self).apply_gradients,
autograph=False,
experimental_relax_shapes=True)
return self._traced_apply_gradients(grads)
TracedEagerPolicy.__name__ = eager_policy_cls.__name__
TracedEagerPolicy.__qualname__ = eager_policy_cls.__qualname__
return TracedEagerPolicy
def build_eager_tf_policy(
name,
loss_fn,
get_default_config=None,
postprocess_fn=None,
stats_fn=None,
optimizer_fn=None,
compute_gradients_fn=None,
apply_gradients_fn=None,
grad_stats_fn=None,
extra_learn_fetches_fn=None,
extra_action_out_fn=None,
validate_spaces=None,
before_init=None,
before_loss_init=None,
after_init=None,
make_model=None,
action_sampler_fn=None,
action_distribution_fn=None,
mixins=None,
get_batch_divisibility_req=None,
# Deprecated args.
obs_include_prev_action_reward=DEPRECATED_VALUE,
extra_action_fetches_fn=None,
gradients_fn=None,
):
"""Build an eager TF policy.
An eager policy runs all operations in eager mode, which makes debugging
much simpler, but has lower performance.
You shouldn't need to call this directly. Rather, prefer to build a TF
graph policy and use set {"framework": "tfe"} in the trainer config to have
it automatically be converted to an eager policy.
This has the same signature as build_tf_policy()."""
base = add_mixins(Policy, mixins)
if obs_include_prev_action_reward != DEPRECATED_VALUE:
deprecation_warning(old="obs_include_prev_action_reward", error=False)
if extra_action_fetches_fn is not None:
deprecation_warning(
old="extra_action_fetches_fn",
new="extra_action_out_fn",
error=False)
extra_action_out_fn = extra_action_fetches_fn
if gradients_fn is not None:
deprecation_warning(
old="gradients_fn", new="compute_gradients_fn", error=False)
compute_gradients_fn = gradients_fn
class eager_policy_cls(base):
def __init__(self, observation_space, action_space, config):
assert tf.executing_eagerly()
self.framework = config.get("framework", "tfe")
Policy.__init__(self, observation_space, action_space, config)
# Log device and worker index.
from ray.rllib.evaluation.rollout_worker import get_global_worker
worker = get_global_worker()
worker_idx = worker.worker_index if worker else 0
if tf.config.list_physical_devices("GPU"):
logger.info(
"TF-eager Policy (worker={}) running on GPU.".format(
worker_idx if worker_idx > 0 else "local"))
else:
logger.info(
"TF-eager Policy (worker={}) running on CPU.".format(
worker_idx if worker_idx > 0 else "local"))
self._is_training = False
self._loss_initialized = False
self._sess = None
self._loss = loss_fn
self.batch_divisibility_req = get_batch_divisibility_req(self) if \
callable(get_batch_divisibility_req) else \
(get_batch_divisibility_req or 1)
self._max_seq_len = config["model"]["max_seq_len"]
if get_default_config:
config = dict(get_default_config(), **config)
if validate_spaces:
validate_spaces(self, observation_space, action_space, config)
if before_init:
before_init(self, observation_space, action_space, config)
self.config = config
self.dist_class = None
if action_sampler_fn or action_distribution_fn:
if not make_model:
raise ValueError(
"`make_model` is required if `action_sampler_fn` OR "
"`action_distribution_fn` is given")
else:
self.dist_class, logit_dim = ModelCatalog.get_action_dist(
action_space, self.config["model"])
if make_model:
self.model = make_model(self, observation_space, action_space,
config)
else:
self.model = ModelCatalog.get_model_v2(
observation_space,
action_space,
logit_dim,
config["model"],
framework=self.framework,
)
# Lock used for locking some methods on the object-level.
# This prevents possible race conditions when calling the model
# first, then its value function (e.g. in a loss function), in
# between of which another model call is made (e.g. to compute an
# action).
self._lock = threading.RLock()
# Auto-update model's inference view requirements, if recurrent.
self._update_model_view_requirements_from_init_state()
self.exploration = self._create_exploration()
self._state_inputs = self.model.get_initial_state()
self._is_recurrent = len(self._state_inputs) > 0
# Combine view_requirements for Model and Policy.
self.view_requirements.update(self.model.view_requirements)
if before_loss_init:
before_loss_init(self, observation_space, action_space, config)
if optimizer_fn:
optimizers = optimizer_fn(self, config)
else:
optimizers = tf.keras.optimizers.Adam(config["lr"])
optimizers = force_list(optimizers)
if getattr(self, "exploration", None):
optimizers = self.exploration.get_exploration_optimizer(
optimizers)
# TODO: (sven) Allow tf policy to have more than 1 optimizer.
# Just like torch Policy does.
self._optimizer = optimizers[0] if optimizers else None
self._initialize_loss_from_dummy_batch(
auto_remove_unneeded_view_reqs=True,
stats_fn=stats_fn,
)
self._loss_initialized = True
if after_init:
after_init(self, observation_space, action_space, config)
# Got to reset global_timestep again after fake run-throughs.
self.global_timestep = 0
@override(Policy)
def postprocess_trajectory(self,
sample_batch,
other_agent_batches=None,
episode=None):
assert tf.executing_eagerly()
# Call super's postprocess_trajectory first.
sample_batch = Policy.postprocess_trajectory(self, sample_batch)
if postprocess_fn:
return postprocess_fn(self, sample_batch, other_agent_batches,
episode)
return sample_batch
@with_lock
@override(Policy)
def learn_on_batch(self, postprocessed_batch):
# Callback handling.
learn_stats = {}
self.callbacks.on_learn_on_batch(
policy=self,
train_batch=postprocessed_batch,
result=learn_stats)
if not isinstance(postprocessed_batch, SampleBatch) or \
not postprocessed_batch.zero_padded:
pad_batch_to_sequences_of_same_size(
postprocessed_batch,
max_seq_len=self._max_seq_len,
shuffle=False,
batch_divisibility_req=self.batch_divisibility_req,
view_requirements=self.view_requirements,
)
self._is_training = True
postprocessed_batch["is_training"] = True
stats = self._learn_on_batch_eager(postprocessed_batch)
stats.update({"custom_metrics": learn_stats})
return stats
@convert_eager_inputs
@convert_eager_outputs
def _learn_on_batch_eager(self, samples):
with tf.variable_creator_scope(_disallow_var_creation):
grads_and_vars, stats = self._compute_gradients(samples)
self._apply_gradients(grads_and_vars)
return stats
@override(Policy)
def compute_gradients(self, samples):
pad_batch_to_sequences_of_same_size(
samples,
shuffle=False,
max_seq_len=self._max_seq_len,
batch_divisibility_req=self.batch_divisibility_req)
self._is_training = True
samples["is_training"] = True
return self._compute_gradients_eager(samples)
@convert_eager_inputs
@convert_eager_outputs
def _compute_gradients_eager(self, samples):
with tf.variable_creator_scope(_disallow_var_creation):
grads_and_vars, stats = self._compute_gradients(samples)
grads = [g for g, v in grads_and_vars]
return grads, stats
@override(Policy)
def compute_actions(self,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None,
info_batch=None,
episodes=None,
explore=None,
timestep=None,
**kwargs):
self._is_training = False
self._is_recurrent = \
state_batches is not None and state_batches != []
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
input_dict = {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_batch),
"is_training": tf.constant(False),
}
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = \
tf.convert_to_tensor(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = \
tf.convert_to_tensor(prev_reward_batch)
return self._compute_action_helper(input_dict, state_batches,
episodes, explore, timestep)
@override(Policy)
def compute_actions_from_input_dict(
self,
input_dict: Dict[str, TensorType],
explore: bool = None,
timestep: Optional[int] = None,
**kwargs
) -> Tuple[TensorType, List[TensorType], Dict[str, TensorType]]:
if not tf1.executing_eagerly():
tf1.enable_eager_execution()
# Pass lazy (eager) tensor dict to Model as `input_dict`.
input_dict = self._lazy_tensor_dict(input_dict)
# Pack internal state inputs into (separate) list.
state_batches = [
input_dict[k] for k in input_dict.keys() if "state_in" in k[:8]
]
return self._compute_action_helper(input_dict, state_batches, None,
explore, timestep)
@with_lock
@convert_eager_inputs
@convert_eager_outputs
def _compute_action_helper(self, input_dict, state_batches, episodes,
explore, timestep):
explore = explore if explore is not None else \
self.config["explore"]
timestep = timestep if timestep is not None else \
self.global_timestep
if isinstance(timestep, tf.Tensor):
timestep = int(timestep.numpy())
self._is_training = False
self._state_in = state_batches or []
# Calculate RNN sequence lengths.
batch_size = input_dict[SampleBatch.CUR_OBS].shape[0]
seq_lens = tf.ones(batch_size, dtype=tf.int32) if state_batches \
else None
# Add default and custom fetches.
extra_fetches = {}
# Use Exploration object.
with tf.variable_creator_scope(_disallow_var_creation):
if action_sampler_fn:
dist_inputs = None
state_out = []
actions, logp = action_sampler_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
episodes=episodes)
else:
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(
timestep=timestep, explore=explore)
if action_distribution_fn:
# Try new action_distribution_fn signature, supporting
# state_batches and seq_lens.
try:
dist_inputs, self.dist_class, state_out = \
action_distribution_fn(
self,
self.model,
input_dict=input_dict,
state_batches=state_batches,
seq_lens=seq_lens,
explore=explore,
timestep=timestep,
is_training=False)
# Trying the old way (to stay backward compatible).
# TODO: Remove in future.
except TypeError as e:
if "positional argument" in e.args[0] or \
"unexpected keyword argument" in e.args[0]:
dist_inputs, self.dist_class, state_out = \
action_distribution_fn(
self, self.model,
input_dict[SampleBatch.CUR_OBS],
explore=explore,
timestep=timestep,
is_training=False)
else:
raise e
elif isinstance(self.model, tf.keras.Model):
input_dict = SampleBatch(input_dict, seq_lens=seq_lens)
if state_batches and "state_in_0" not in input_dict:
for i, s in enumerate(state_batches):
input_dict[f"state_in_{i}"] = s
self._lazy_tensor_dict(input_dict)
dist_inputs, state_out, extra_fetches = \
self.model(input_dict)
else:
dist_inputs, state_out = self.model(
input_dict, state_batches, seq_lens)
action_dist = self.dist_class(dist_inputs, self.model)
# Get the exploration action from the forward results.
actions, logp = self.exploration.get_exploration_action(
action_distribution=action_dist,
timestep=timestep,
explore=explore)
# Action-logp and action-prob.
if logp is not None:
extra_fetches[SampleBatch.ACTION_PROB] = tf.exp(logp)
extra_fetches[SampleBatch.ACTION_LOGP] = logp
# Action-dist inputs.
if dist_inputs is not None:
extra_fetches[SampleBatch.ACTION_DIST_INPUTS] = dist_inputs
# Custom extra fetches.
if extra_action_out_fn:
extra_fetches.update(extra_action_out_fn(self))
# Update our global timestep by the batch size.
self.global_timestep += int(batch_size)
return actions, state_out, extra_fetches
@with_lock
@override(Policy)
def compute_log_likelihoods(self,
actions,
obs_batch,
state_batches=None,
prev_action_batch=None,
prev_reward_batch=None):
if action_sampler_fn and action_distribution_fn is None:
raise ValueError("Cannot compute log-prob/likelihood w/o an "
"`action_distribution_fn` and a provided "
"`action_sampler_fn`!")
seq_lens = tf.ones(len(obs_batch), dtype=tf.int32)
input_dict = {
SampleBatch.CUR_OBS: tf.convert_to_tensor(obs_batch),
"is_training": tf.constant(False),
}
if prev_action_batch is not None:
input_dict[SampleBatch.PREV_ACTIONS] = \
tf.convert_to_tensor(prev_action_batch)
if prev_reward_batch is not None:
input_dict[SampleBatch.PREV_REWARDS] = \
tf.convert_to_tensor(prev_reward_batch)
# Exploration hook before each forward pass.
self.exploration.before_compute_actions(explore=False)
# Action dist class and inputs are generated via custom function.
if action_distribution_fn:
dist_inputs, dist_class, _ = action_distribution_fn(
self,
self.model,
input_dict[SampleBatch.CUR_OBS],
explore=False,
is_training=False)
# Default log-likelihood calculation.
else:
dist_inputs, _ = self.model(input_dict, state_batches,
seq_lens)
dist_class = self.dist_class
action_dist = dist_class(dist_inputs, self.model)
log_likelihoods = action_dist.logp(actions)
return log_likelihoods
@override(Policy)
def apply_gradients(self, gradients):
self._apply_gradients(
zip([(tf.convert_to_tensor(g) if g is not None else None)
for g in gradients], self.model.trainable_variables()))
@override(Policy)
def get_exploration_state(self):
return _convert_to_numpy(self.exploration.get_state())
@override(Policy)
def get_weights(self, as_dict=False):
variables = self.variables()
if as_dict:
return {v.name: v.numpy() for v in variables}
return [v.numpy() for v in variables]
@override(Policy)
def set_weights(self, weights):
variables = self.variables()
assert len(weights) == len(variables), (len(weights),
len(variables))
for v, w in zip(variables, weights):
v.assign(w)
@override(Policy)
def get_state(self):
state = super().get_state()
if self._optimizer and \
len(self._optimizer.variables()) > 0:
state["_optimizer_variables"] = \
self._optimizer.variables()
# Add exploration state.
state["_exploration_state"] = self.exploration.get_state()
return state
@override(Policy)
def set_state(self, state):
state = state.copy() # shallow copy
# Set optimizer vars first.
optimizer_vars = state.get("_optimizer_variables", None)
if optimizer_vars and self._optimizer.variables():
logger.warning(
"Cannot restore an optimizer's state for tf eager! Keras "
"is not able to save the v1.x optimizers (from "
"tf.compat.v1.train) since they aren't compatible with "
"checkpoints.")
for opt_var, value in zip(self._optimizer.variables(),
optimizer_vars):
opt_var.assign(value)
# Set exploration's state.
if hasattr(self, "exploration") and "_exploration_state" in state:
self.exploration.set_state(state=state["_exploration_state"])
# Then the Policy's (NN) weights.
super().set_state(state)
def variables(self):
"""Return the list of all savable variables for this policy."""
if isinstance(self.model, tf.keras.Model):
return self.model.variables
else:
return self.model.variables()
@override(Policy)
def is_recurrent(self):
return self._is_recurrent
@override(Policy)
def num_state_tensors(self):
return len(self._state_inputs)
@override(Policy)
def get_initial_state(self):
if hasattr(self, "model"):
return self.model.get_initial_state()
return []
def get_session(self):
return None # None implies eager
def get_placeholder(self, ph):
raise ValueError(
"get_placeholder() is not allowed in eager mode. Try using "
"rllib.utils.tf_ops.make_tf_callable() to write "
"functions that work in both graph and eager mode.")
def loss_initialized(self):
return self._loss_initialized
@override(Policy)
def export_model(self, export_dir):
pass
# TODO: (sven) Deprecate this in favor of `save()`.
@override(Policy)
def export_checkpoint(self, export_dir):
deprecation_warning("export_checkpoint", "save")
def _get_is_training_placeholder(self):
return tf.convert_to_tensor(self._is_training)
def _apply_gradients(self, grads_and_vars):
if apply_gradients_fn:
apply_gradients_fn(self, self._optimizer, grads_and_vars)
else:
self._optimizer.apply_gradients(
[(g, v) for g, v in grads_and_vars if g is not None])
@with_lock
def _compute_gradients(self, samples):
"""Computes and returns grads as eager tensors."""
with tf.GradientTape(persistent=compute_gradients_fn is not None) \
as tape:
loss = loss_fn(self, self.model, self.dist_class, samples)
if isinstance(self.model, tf.keras.Model):
variables = self.model.trainable_variables
else:
variables = self.model.trainable_variables()
if compute_gradients_fn:
class OptimizerWrapper:
def __init__(self, tape):
self.tape = tape
def compute_gradients(self, loss, var_list):
return list(
zip(self.tape.gradient(loss, var_list), var_list))
grads_and_vars = compute_gradients_fn(self,
OptimizerWrapper(tape),
loss)
else:
grads_and_vars = list(
zip(tape.gradient(loss, variables), variables))
if log_once("grad_vars"):
for _, v in grads_and_vars:
logger.info("Optimizing variable {}".format(v.name))
grads = [g for g, v in grads_and_vars]
stats = self._stats(self, samples, grads)
return grads_and_vars, stats
def _stats(self, outputs, samples, grads):
fetches = {}
if stats_fn:
fetches[LEARNER_STATS_KEY] = {
k: v
for k, v in stats_fn(outputs, samples).items()
}
else:
fetches[LEARNER_STATS_KEY] = {}
if extra_learn_fetches_fn:
fetches.update(
{k: v
for k, v in extra_learn_fetches_fn(self).items()})
if grad_stats_fn:
fetches.update({
k: v
for k, v in grad_stats_fn(self, samples, grads).items()
})
return fetches
def _lazy_tensor_dict(self, postprocessed_batch: SampleBatch):
# TODO: (sven): Keep for a while to ensure backward compatibility.
if not isinstance(postprocessed_batch, SampleBatch):
postprocessed_batch = SampleBatch(postprocessed_batch)
postprocessed_batch.set_get_interceptor(_convert_to_tf)
return postprocessed_batch
@classmethod
def with_tracing(cls):
return traced_eager_policy(cls)
eager_policy_cls.__name__ = name + "_eager"
eager_policy_cls.__qualname__ = name + "_eager"
return eager_policy_cls
| {
"content_hash": "9b717d742a3022cb524633b04980844b",
"timestamp": "",
"source": "github",
"line_count": 794,
"max_line_length": 79,
"avg_line_length": 39.885390428211586,
"alnum_prop": 0.5285926300167356,
"repo_name": "pcmoritz/ray-1",
"id": "edc6eaafa26e994b2054c2960f81b19cbd4a446f",
"size": "31669",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/policy/eager_tf_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "70670"
},
{
"name": "C++",
"bytes": "4670851"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Dockerfile",
"bytes": "14159"
},
{
"name": "HTML",
"bytes": "30414"
},
{
"name": "Java",
"bytes": "1338604"
},
{
"name": "JavaScript",
"bytes": "914"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "Python",
"bytes": "10523389"
},
{
"name": "Shell",
"bytes": "117557"
},
{
"name": "Smarty",
"bytes": "239"
},
{
"name": "Starlark",
"bytes": "238506"
},
{
"name": "TypeScript",
"bytes": "259269"
}
],
"symlink_target": ""
} |
"""Support for Tile device trackers."""
import logging
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.components.device_tracker.const import SOURCE_TYPE_GPS
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from . import DATA_COORDINATOR, DATA_TILE, DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_ALTITUDE = "altitude"
ATTR_CONNECTION_STATE = "connection_state"
ATTR_IS_DEAD = "is_dead"
ATTR_IS_LOST = "is_lost"
ATTR_LAST_LOST_TIMESTAMP = "last_lost_timestamp"
ATTR_RING_STATE = "ring_state"
ATTR_TILE_NAME = "tile_name"
ATTR_VOIP_STATE = "voip_state"
DEFAULT_ATTRIBUTION = "Data provided by Tile"
DEFAULT_ICON = "mdi:view-grid"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Tile device trackers."""
async_add_entities(
[
TileDeviceTracker(
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][tile_uuid], tile
)
for tile_uuid, tile in hass.data[DOMAIN][DATA_TILE][entry.entry_id].items()
]
)
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Detect a legacy configuration and import it."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: config[CONF_USERNAME],
CONF_PASSWORD: config[CONF_PASSWORD],
},
)
)
_LOGGER.info(
"Your Tile configuration has been imported into the UI; "
"please remove it from configuration.yaml"
)
return True
class TileDeviceTracker(CoordinatorEntity, TrackerEntity):
"""Representation of a network infrastructure device."""
def __init__(self, coordinator, tile):
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._tile = tile
@property
def available(self):
"""Return if entity is available."""
return self.coordinator.last_update_success and not self._tile.dead
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return None
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return DEFAULT_ICON
@property
def location_accuracy(self):
"""Return the location accuracy of the device.
Value in meters.
"""
return self._tile.accuracy
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
return self._tile.latitude
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
return self._tile.longitude
@property
def name(self):
"""Return the name."""
return self._tile.name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"tile_{self._tile.uuid}"
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
@callback
def _handle_coordinator_update(self):
"""Respond to a DataUpdateCoordinator update."""
self._update_from_latest_data()
self.async_write_ha_state()
@callback
def _update_from_latest_data(self):
"""Update the entity from the latest data."""
self._attrs.update(
{
ATTR_ALTITUDE: self._tile.altitude,
ATTR_IS_LOST: self._tile.lost,
ATTR_LAST_LOST_TIMESTAMP: self._tile.lost_timestamp,
ATTR_RING_STATE: self._tile.ring_state,
ATTR_VOIP_STATE: self._tile.voip_state,
}
)
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
self._update_from_latest_data()
| {
"content_hash": "9dabd4226b3faafa8d638e77e8866301",
"timestamp": "",
"source": "github",
"line_count": 148,
"max_line_length": 87,
"avg_line_length": 29.37162162162162,
"alnum_prop": 0.6252587991718427,
"repo_name": "partofthething/home-assistant",
"id": "f7cc4e1736e6cd23ef7b81e77a1b91a5837d063a",
"size": "4347",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/components/tile/device_tracker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1720"
},
{
"name": "Python",
"bytes": "31051838"
},
{
"name": "Shell",
"bytes": "4832"
}
],
"symlink_target": ""
} |
import warnings
import numpy as np
from skimage.viewer.qt import QtWidgets, has_qt, FigureManagerQT, FigureCanvasQTAgg
import matplotlib as mpl
from matplotlib.figure import Figure
from matplotlib import _pylab_helpers
from matplotlib.colors import LinearSegmentedColormap
if has_qt and 'agg' not in mpl.get_backend().lower():
warnings.warn("Recommended matplotlib backend is `Agg` for full "
"skimage.viewer functionality.")
__all__ = ['init_qtapp', 'start_qtapp', 'RequiredAttr', 'figimage',
'LinearColormap', 'ClearColormap', 'FigureCanvas', 'new_plot',
'update_axes_image']
QApp = None
def init_qtapp():
"""Initialize QAppliction.
The QApplication needs to be initialized before creating any QWidgets
"""
global QApp
QApp = QtWidgets.QApplication.instance()
if QApp is None:
QApp = QtWidgets.QApplication([])
return QApp
def is_event_loop_running(app=None):
"""Return True if event loop is running."""
if app is None:
app = init_qtapp()
if hasattr(app, '_in_event_loop'):
return app._in_event_loop
else:
return False
def start_qtapp(app=None):
"""Start Qt mainloop"""
if app is None:
app = init_qtapp()
if not is_event_loop_running(app):
app._in_event_loop = True
app.exec_()
app._in_event_loop = False
else:
app._in_event_loop = True
class RequiredAttr(object):
"""A class attribute that must be set before use."""
instances = dict()
def __init__(self, init_val=None):
self.instances[self, None] = init_val
def __get__(self, obj, objtype):
value = self.instances[self, obj]
if value is None:
raise AttributeError('Required attribute not set')
return value
def __set__(self, obj, value):
self.instances[self, obj] = value
class LinearColormap(LinearSegmentedColormap):
"""LinearSegmentedColormap in which color varies smoothly.
This class is a simplification of LinearSegmentedColormap, which doesn't
support jumps in color intensities.
Parameters
----------
name : str
Name of colormap.
segmented_data : dict
Dictionary of 'red', 'green', 'blue', and (optionally) 'alpha' values.
Each color key contains a list of `x`, `y` tuples. `x` must increase
monotonically from 0 to 1 and corresponds to input values for a
mappable object (e.g. an image). `y` corresponds to the color
intensity.
"""
def __init__(self, name, segmented_data, **kwargs):
segmented_data = dict((key, [(x, y, y) for x, y in value])
for key, value in segmented_data.items())
LinearSegmentedColormap.__init__(self, name, segmented_data, **kwargs)
class ClearColormap(LinearColormap):
"""Color map that varies linearly from alpha = 0 to 1
"""
def __init__(self, rgb, max_alpha=1, name='clear_color'):
r, g, b = rgb
cg_speq = {'blue': [(0.0, b), (1.0, b)],
'green': [(0.0, g), (1.0, g)],
'red': [(0.0, r), (1.0, r)],
'alpha': [(0.0, 0.0), (1.0, max_alpha)]}
LinearColormap.__init__(self, name, cg_speq)
class FigureCanvas(FigureCanvasQTAgg):
"""Canvas for displaying images."""
def __init__(self, figure, **kwargs):
self.fig = figure
FigureCanvasQTAgg.__init__(self, self.fig)
FigureCanvasQTAgg.setSizePolicy(self,
QtWidgets.QSizePolicy.Expanding,
QtWidgets.QSizePolicy.Expanding)
FigureCanvasQTAgg.updateGeometry(self)
def resizeEvent(self, event):
FigureCanvasQTAgg.resizeEvent(self, event)
# Call to `resize_event` missing in FigureManagerQT.
# See https://github.com/matplotlib/matplotlib/pull/1585
self.resize_event()
def new_canvas(*args, **kwargs):
"""Return a new figure canvas."""
allnums = _pylab_helpers.Gcf.figs.keys()
num = max(allnums) + 1 if allnums else 1
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvas(figure)
fig_manager = FigureManagerQT(canvas, num)
return fig_manager.canvas
def new_plot(parent=None, subplot_kw=None, **fig_kw):
"""Return new figure and axes.
Parameters
----------
parent : QtWidget
Qt widget that displays the plot objects. If None, you must manually
call ``canvas.setParent`` and pass the parent widget.
subplot_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure.add_subplot``.
fig_kw : dict
Keyword arguments passed ``matplotlib.figure.Figure``.
"""
if subplot_kw is None:
subplot_kw = {}
canvas = new_canvas(**fig_kw)
canvas.setParent(parent)
fig = canvas.figure
ax = fig.add_subplot(1, 1, 1, **subplot_kw)
return fig, ax
def figimage(image, scale=1, dpi=None, **kwargs):
"""Return figure and axes with figure tightly surrounding image.
Unlike pyplot.figimage, this actually plots onto an axes object, which
fills the figure. Plotting the image onto an axes allows for subsequent
overlays of axes artists.
Parameters
----------
image : array
image to plot
scale : float
If scale is 1, the figure and axes have the same dimension as the
image. Smaller values of `scale` will shrink the figure.
dpi : int
Dots per inch for figure. If None, use the default rcParam.
"""
dpi = dpi if dpi is not None else mpl.rcParams['figure.dpi']
kwargs.setdefault('interpolation', 'nearest')
kwargs.setdefault('cmap', 'gray')
h, w, d = np.atleast_3d(image).shape
figsize = np.array((w, h), dtype=float) / dpi * scale
fig, ax = new_plot(figsize=figsize, dpi=dpi)
fig.subplots_adjust(left=0, bottom=0, right=1, top=1)
ax.set_axis_off()
ax.imshow(image, **kwargs)
ax.figure.canvas.draw()
return fig, ax
def update_axes_image(image_axes, image):
"""Update the image displayed by an image plot.
This sets the image plot's array and updates its shape appropriately
Parameters
----------
image_axes : `matplotlib.image.AxesImage`
Image axes to update.
image : array
Image array.
"""
image_axes.set_array(image)
# Adjust size if new image shape doesn't match the original
h, w = image.shape[:2]
image_axes.set_extent((0, w, h, 0))
| {
"content_hash": "02c38d04bf713d88ab62f837321e4c3e",
"timestamp": "",
"source": "github",
"line_count": 213,
"max_line_length": 83,
"avg_line_length": 30.84037558685446,
"alnum_prop": 0.6229258639062262,
"repo_name": "newville/scikit-image",
"id": "524e9b6166c4d2dc0cef47c9acb0723bcd64baba",
"size": "6569",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "skimage/viewer/utils/core.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "76670"
},
{
"name": "Makefile",
"bytes": "449"
},
{
"name": "Python",
"bytes": "2158081"
}
],
"symlink_target": ""
} |
import itsdangerous
import mock
from nose.tools import * # flake8: noqa
import unittest
from django.utils import timezone
from tests.base import ApiTestCase
from osf_tests.factories import (
AuthUserFactory
)
from api.base.settings.defaults import API_BASE
from framework.auth.oauth_scopes import public_scopes
from framework.auth.cas import CasResponse
from website import settings
from osf.models import ApiOAuth2PersonalToken, Session
class TestWelcomeToApi(ApiTestCase):
def setUp(self):
super(TestWelcomeToApi, self).setUp()
self.user = AuthUserFactory()
self.url = '/{}'.format(API_BASE)
def tearDown(self):
self.app.reset()
super(TestWelcomeToApi, self).tearDown()
def test_returns_200_for_logged_out_user(self):
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(res.json['meta']['current_user'], None)
def test_returns_current_user_info_when_logged_in(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(
res.json['meta']['current_user']['data']['attributes']['given_name'],
self.user.given_name
)
def test_current_user_accepted_tos(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(
res.json['meta']['current_user']['data']['attributes']['accepted_terms_of_service'],
False
)
self.user.accepted_terms_of_service = timezone.now()
self.user.save()
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_equal(res.content_type, 'application/vnd.api+json')
assert_equal(
res.json['meta']['current_user']['data']['attributes']['accepted_terms_of_service'],
True
)
def test_returns_302_redirect_for_base_url(self):
res = self.app.get('/')
assert_equal(res.status_code, 302)
assert_equal(res.location, '/v2/')
def test_cookie_has_admin(self):
session = Session(data={'auth_user_id': self.user._id})
session.save()
cookie = itsdangerous.Signer(settings.SECRET_KEY).sign(session._id)
self.app.set_cookie(settings.COOKIE_NAME, str(cookie))
res = self.app.get(self.url)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['admin'], True)
def test_basic_auth_does_not_have_admin(self):
res = self.app.get(self.url, auth=self.user.auth)
assert_equal(res.status_code, 200)
assert_not_in('admin', res.json['meta'].keys())
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
# TODO: Remove when available outside of DEV_MODE
@unittest.skipIf(
not settings.DEV_MODE,
'DEV_MODE disabled, osf.admin unavailable'
)
def test_admin_scoped_token_has_admin(self, mock_auth):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Admin Token',
scopes='osf.admin'
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
res = self.app.get(
self.url,
headers={
'Authorization': 'Bearer {}'.format(token.token_id)
}
)
assert_equal(res.status_code, 200)
assert_equal(res.json['meta']['admin'], True)
@mock.patch('api.base.authentication.drf.OSFCASAuthentication.authenticate')
def test_non_admin_scoped_token_does_not_have_admin(self, mock_auth):
token = ApiOAuth2PersonalToken(
owner=self.user,
name='Admin Token',
scopes=' '.join([key for key in public_scopes if key != 'osf.admin'])
)
mock_cas_resp = CasResponse(
authenticated=True,
user=self.user._id,
attributes={
'accessToken': token.token_id,
'accessTokenScope': [s for s in token.scopes.split(' ')]
}
)
mock_auth.return_value = self.user, mock_cas_resp
res = self.app.get(
self.url,
headers={
'Authorization': 'Bearer {}'.format(token.token_id)
}
)
assert_equal(res.status_code, 200)
assert_not_in('admin', res.json['meta'].keys())
| {
"content_hash": "101831291a120101b8af7a3e1acb3be0",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 96,
"avg_line_length": 34.95,
"alnum_prop": 0.6012671162885755,
"repo_name": "icereval/osf.io",
"id": "758263b7ef0df56b2fbcb37330ecec8d548d8c54",
"size": "4917",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_tests/base/test_root.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "108526"
},
{
"name": "HTML",
"bytes": "261937"
},
{
"name": "JavaScript",
"bytes": "1856123"
},
{
"name": "Mako",
"bytes": "691640"
},
{
"name": "Python",
"bytes": "8331919"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
from baremetal_network_provisioning.common import constants as hp_const
from baremetal_network_provisioning.ml2 import (hp_network_provisioning_driver
as np_drv)
from baremetal_network_provisioning.ml2 import mechanism_hp as hp_mech
import contextlib
import mock
from oslo_config import cfg
from neutron.extensions import portbindings
from neutron.tests import base
CONF = cfg.CONF
class TestHPMechDriver(base.BaseTestCase):
"""Test class for mech driver."""
def setUp(self):
super(TestHPMechDriver, self).setUp()
self.driver = hp_mech.HPMechanismDriver()
self.driver.initialize()
self.driver._load_drivers()
def _get_port_context(self, tenant_id, net_id, vm_id, network):
"""Get port context."""
port = {'device_id': vm_id,
'device_owner': 'compute',
'binding:host_id': 'ironic',
'name': 'test-port',
'tenant_id': tenant_id,
'id': 123456,
'network_id': net_id,
'binding:profile':
{'local_link_information': [{'switch_id': '11:22:33:44:55:66',
'port_id': 'Tengig0/1'}]},
'binding:vnic_type': 'baremetal',
'admin_state_up': True,
}
return FakePortContext(port, port, network)
def _get_network_context(self, tenant_id, net_id, seg_id, shared):
"""Get network context."""
network = {'id': net_id,
'tenant_id': tenant_id,
'name': 'test-net',
'shared': shared}
network_segments = [{'segmentation_id': seg_id}]
return FakeNetworkContext(network, network_segments, network)
def _get_port_dict(self):
"""Get port dict."""
port_dict = {'port':
{'segmentation_id': 1001,
'host_id': 'ironic',
'access_type': hp_const.ACCESS,
'switchports':
[{'port_id': 'Tengig0/1',
'switch_id': '11:22:33:44:55:66'}],
'id': 123456,
'network_id': "net1-id",
'is_lag': False}}
return port_dict
def test_create_port_precommit(self):
"""Test create_port_precommit method."""
fake_port_dict = mock.Mock()
fake_context = mock.Mock()
with contextlib.nested(
mock.patch.object(hp_mech.HPMechanismDriver,
'_is_port_of_interest',
return_value=True),
mock.patch.object(hp_mech.HPMechanismDriver,
'_construct_port',
return_value=fake_port_dict),
mock.patch.object(np_drv.HPNetworkProvisioningDriver,
'create_port',
return_value=None)
) as (is_port, cons_port, c_port):
self.driver.create_port_precommit(fake_context)
is_port.assert_called_with(fake_context)
cons_port.assert_called_with(fake_context)
c_port.assert_called_with(fake_port_dict)
def test_delete_port_precommit(self):
"""Test delete_port_precommit method."""
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vm_id = 'vm1'
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id,
False)
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
port_id = port_context.current['id']
with contextlib.nested(
mock.patch.object(hp_mech.HPMechanismDriver,
'_get_vnic_type',
return_value=portbindings.VNIC_BAREMETAL),
mock.patch.object(np_drv.HPNetworkProvisioningDriver,
'delete_port',
return_value=None)
) as (vnic_type, d_port):
self.driver.delete_port_precommit(port_context)
vnic_type.assert_called_with(port_context)
d_port.assert_called_with(port_id)
def test__construct_port(self):
"""Test _construct_port method."""
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vm_id = 'vm1'
fake_port_dict = self._get_port_dict()
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id,
False)
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
port_dict = self.driver._construct_port(port_context, segmentation_id)
self.assertEqual(port_dict, fake_port_dict)
def test__get_binding_profile(self):
"""Test _get_binding_profile method."""
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vm_id = 'vm1'
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id,
False)
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
fake_profile = {'local_link_information':
[{'switch_id': '11:22:33:44:55:66',
'port_id': 'Tengig0/1'}]}
profile = self.driver._get_binding_profile(port_context)
self.assertEqual(profile, fake_profile)
def test__get_vnic_type(self):
"""Test _get_binding_profile method."""
tenant_id = 'ten-1'
network_id = 'net1-id'
segmentation_id = 1001
vm_id = 'vm1'
network_context = self._get_network_context(tenant_id,
network_id,
segmentation_id,
False)
port_context = self._get_port_context(tenant_id,
network_id,
vm_id,
network_context)
vnic_type = self.driver._get_vnic_type(port_context)
self.assertEqual(vnic_type, 'baremetal')
class FakeNetworkContext(object):
"""To generate network context for testing purposes only."""
def __init__(self, network, segments=None, original_network=None):
self._network = network
self._original_network = original_network
self._segments = segments
@property
def current(self):
return self._network
@property
def original(self):
return self._original_network
@property
def network_segments(self):
return self._segments
class FakePortContext(object):
"""To generate port context for testing purposes only."""
def __init__(self, port, original_port, network):
self._port = port
self._original_port = original_port
self._network_context = network
@property
def current(self):
return self._port
@property
def original(self):
return self._original_port
@property
def network(self):
return self._network_context
| {
"content_hash": "b754f627701df5edafd3d6a839dd98d1",
"timestamp": "",
"source": "github",
"line_count": 211,
"max_line_length": 78,
"avg_line_length": 38.76777251184834,
"alnum_prop": 0.4799511002444988,
"repo_name": "selvakumars2/baremetal-network-provisioning",
"id": "878122269cc46f38922396b182390f652046dc78",
"size": "8794",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "baremetal_network_provisioning/tests/unit/ml2/test_mechanism_hp.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "205720"
},
{
"name": "Shell",
"bytes": "1887"
}
],
"symlink_target": ""
} |
import logging
import gevent
from ...policy import Policy
from . import models
logger = logging.getLogger(__name__)
class MongoPolicy(Policy):
_name = "mongo"
def search_domain(self, protocol):
return models.Domain.search(protocol)
def search_mynetwork(self, protocol):
return models.Mynetwork.search(protocol)
def search_policy(self, protocol):
return models.Policy.search(protocol)
def search_blacklist(self, protocol):
return models.BlackList.search(protocol)
def search_whitelist(self, protocol):
return models.WhiteList.search(protocol)
def search_greylist(self, key):
return models.GreylistEntry.search_entry(key)
def create_greylist(self, key=None, protocol=None, policy=None):
return models.GreylistEntry.create_entry(key=key, protocol=protocol, policy=policy)
def task_purge_expire(self, run_once=False):
logger.info("Start Expired Purge...")
while True:
gevent.sleep(self.purge_interval)
try:
for_delete = models.query_for_purge()
#for_delete = models.GreylistEntry.objects(expire_time__lt=utils.utcnow())
count = for_delete.count()
if count > 0:
logger.info("purge expire entries : %s" % count)
for_delete.delete()
if run_once:
return
except Exception, err:
logger.error(str(err))
def task_metrics(self):
logger.info("Start Metrics...")
while True:
gevent.sleep(self.metrics_interval)
try:
metric = models.GreylistEntry.last_metrics()
if metric:
models.GreylistMetric(**metric).save()
except Exception, err:
logger.error(str(err))
| {
"content_hash": "cc4537f0e85002a44f5fe715fa3cd6ea",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 91,
"avg_line_length": 30.1875,
"alnum_prop": 0.5797101449275363,
"repo_name": "radical-software/mongrey",
"id": "5805758107c9f655810ddbe04157070dbc95b080",
"size": "1957",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mongrey/storage/mongo/policy.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "755370"
},
{
"name": "HTML",
"bytes": "676261"
},
{
"name": "Python",
"bytes": "353769"
},
{
"name": "Shell",
"bytes": "3450"
}
],
"symlink_target": ""
} |
from collections import namedtuple
from io import StringIO
import click
import errno
import json
import logging
import os
import pathlib
import sys
from .benchmark.codec import JsonEncoder
from .benchmark.compare import RunnerComparator, DEFAULT_THRESHOLD
from .benchmark.runner import CppBenchmarkRunner, JavaBenchmarkRunner
from .compat import _import_pandas
from .lang.cpp import CppCMakeDefinition, CppConfiguration
from .utils.cli import ArrowBool, validate_arrow_sources, add_optional_command
from .utils.lint import linter, python_numpydoc, LintValidationException
from .utils.logger import logger, ctx as log_ctx
from .utils.source import ArrowSources
from .utils.tmpdir import tmpdir
# Set default logging to INFO in command line.
logging.basicConfig(level=logging.INFO)
BOOL = ArrowBool()
@click.group()
@click.option("--debug", type=BOOL, is_flag=True, default=False,
help="Increase logging with debugging output.")
@click.option("--pdb", type=BOOL, is_flag=True, default=False,
help="Invoke pdb on uncaught exception.")
@click.option("-q", "--quiet", type=BOOL, is_flag=True, default=False,
help="Silence executed commands.")
@click.pass_context
def archery(ctx, debug, pdb, quiet):
""" Apache Arrow developer utilities.
See sub-commands help with `archery <cmd> --help`.
"""
# Ensure ctx.obj exists
ctx.ensure_object(dict)
log_ctx.quiet = quiet
if debug:
logger.setLevel(logging.DEBUG)
ctx.debug = debug
if pdb:
import pdb
sys.excepthook = lambda t, v, e: pdb.pm()
build_dir_type = click.Path(dir_okay=True, file_okay=False, resolve_path=True)
# Supported build types
build_type = click.Choice(["debug", "relwithdebinfo", "release"],
case_sensitive=False)
# Supported warn levels
warn_level_type = click.Choice(["everything", "checkin", "production"],
case_sensitive=False)
simd_level = click.Choice(["NONE", "SSE4_2", "AVX2", "AVX512"],
case_sensitive=True)
def cpp_toolchain_options(cmd):
options = [
click.option("--cc", metavar="<compiler>", help="C compiler."),
click.option("--cxx", metavar="<compiler>", help="C++ compiler."),
click.option("--cxx-flags", help="C++ compiler flags."),
click.option("--cpp-package-prefix",
help=("Value to pass for ARROW_PACKAGE_PREFIX and "
"use ARROW_DEPENDENCY_SOURCE=SYSTEM"))
]
return _apply_options(cmd, options)
def java_toolchain_options(cmd):
options = [
click.option("--java-home", metavar="<java_home>",
help="Path to Java Developers Kit."),
click.option("--java-options", help="java compiler options."),
]
return _apply_options(cmd, options)
def _apply_options(cmd, options):
for option in options:
cmd = option(cmd)
return cmd
@archery.command(short_help="Initialize an Arrow C++ build")
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory")
# toolchain
@cpp_toolchain_options
@click.option("--build-type", default=None, type=build_type,
help="CMake's CMAKE_BUILD_TYPE")
@click.option("--warn-level", default="production", type=warn_level_type,
help="Controls compiler warnings -W(no-)error.")
@click.option("--use-gold-linker", default=True, type=BOOL,
help="Toggles ARROW_USE_LD_GOLD option.")
@click.option("--simd-level", default="DEFAULT", type=simd_level,
help="Toggles ARROW_SIMD_LEVEL option.")
# Tests and benchmarks
@click.option("--with-tests", default=True, type=BOOL,
help="Build with tests.")
@click.option("--with-benchmarks", default=None, type=BOOL,
help="Build with benchmarks.")
@click.option("--with-examples", default=None, type=BOOL,
help="Build with examples.")
@click.option("--with-integration", default=None, type=BOOL,
help="Build with integration test executables.")
# Static checks
@click.option("--use-asan", default=None, type=BOOL,
help="Toggle ARROW_USE_ASAN sanitizer.")
@click.option("--use-tsan", default=None, type=BOOL,
help="Toggle ARROW_USE_TSAN sanitizer.")
@click.option("--use-ubsan", default=None, type=BOOL,
help="Toggle ARROW_USE_UBSAN sanitizer.")
@click.option("--with-fuzzing", default=None, type=BOOL,
help="Toggle ARROW_FUZZING.")
# Components
@click.option("--with-compute", default=None, type=BOOL,
help="Build the Arrow compute module.")
@click.option("--with-csv", default=None, type=BOOL,
help="Build the Arrow CSV parser module.")
@click.option("--with-cuda", default=None, type=BOOL,
help="Build the Arrow CUDA extensions.")
@click.option("--with-dataset", default=None, type=BOOL,
help="Build the Arrow dataset module.")
@click.option("--with-filesystem", default=None, type=BOOL,
help="Build the Arrow filesystem layer.")
@click.option("--with-flight", default=None, type=BOOL,
help="Build with Flight rpc support.")
@click.option("--with-gandiva", default=None, type=BOOL,
help="Build with Gandiva expression compiler support.")
@click.option("--with-hdfs", default=None, type=BOOL,
help="Build the Arrow HDFS bridge.")
@click.option("--with-hiveserver2", default=None, type=BOOL,
help="Build the HiveServer2 client and arrow adapater.")
@click.option("--with-ipc", default=None, type=BOOL,
help="Build the Arrow IPC extensions.")
@click.option("--with-json", default=None, type=BOOL,
help="Build the Arrow JSON parser module.")
@click.option("--with-jni", default=None, type=BOOL,
help="Build the Arrow JNI lib.")
@click.option("--with-mimalloc", default=None, type=BOOL,
help="Build the Arrow mimalloc based allocator.")
@click.option("--with-parquet", default=None, type=BOOL,
help="Build with Parquet file support.")
@click.option("--with-plasma", default=None, type=BOOL,
help="Build with Plasma object store support.")
@click.option("--with-python", default=None, type=BOOL,
help="Build the Arrow CPython extesions.")
@click.option("--with-r", default=None, type=BOOL,
help="Build the Arrow R extensions. This is not a CMake option, "
"it will toggle required options")
@click.option("--with-s3", default=None, type=BOOL,
help="Build Arrow with S3 support.")
# Compressions
@click.option("--with-brotli", default=None, type=BOOL,
help="Build Arrow with brotli compression.")
@click.option("--with-bz2", default=None, type=BOOL,
help="Build Arrow with bz2 compression.")
@click.option("--with-lz4", default=None, type=BOOL,
help="Build Arrow with lz4 compression.")
@click.option("--with-snappy", default=None, type=BOOL,
help="Build Arrow with snappy compression.")
@click.option("--with-zlib", default=None, type=BOOL,
help="Build Arrow with zlib compression.")
@click.option("--with-zstd", default=None, type=BOOL,
help="Build Arrow with zstd compression.")
# CMake extra feature
@click.option("--cmake-extras", type=str, multiple=True,
help="Extra flags/options to pass to cmake invocation. "
"Can be stacked")
@click.option("--install-prefix", type=str,
help="Destination directory where files are installed. Expand to"
"CMAKE_INSTALL_PREFIX. Defaults to to $CONDA_PREFIX if the"
"variable exists.")
# misc
@click.option("-f", "--force", type=BOOL, is_flag=True, default=False,
help="Delete existing build directory if found.")
@click.option("--targets", type=str, multiple=True,
help="Generator targets to run. Can be stacked.")
@click.argument("build_dir", type=build_dir_type)
@click.pass_context
def build(ctx, src, build_dir, force, targets, **kwargs):
""" Initialize a C++ build directory.
The build command creates a directory initialized with Arrow's cpp source
cmake and configuration. It can also optionally invoke the generator to
test the build (and used in scripts).
Note that archery will carry the caller environment. It will also not touch
an existing directory, one must use the `--force` option to remove the
existing directory.
Examples:
\b
# Initialize build with clang8 and avx2 support in directory `clang8-build`
\b
archery build --cc=clang-8 --cxx=clang++-8 --cxx-flags=-mavx2 clang8-build
\b
# Builds and run test
archery build --targets=all --targets=test build
"""
# Arrow's cpp cmake configuration
conf = CppConfiguration(**kwargs)
# This is a closure around cmake invocation, e.g. calling `def.build()`
# yields a directory ready to be run with the generator
cmake_def = CppCMakeDefinition(src.cpp, conf)
# Create build directory
build = cmake_def.build(build_dir, force=force)
for target in targets:
build.run(target)
LintCheck = namedtuple('LintCheck', ('option_name', 'help'))
lint_checks = [
LintCheck('clang-format', "Format C++ files with clang-format."),
LintCheck('clang-tidy', "Lint C++ files with clang-tidy."),
LintCheck('cpplint', "Lint C++ files with cpplint."),
LintCheck('iwyu', "Lint changed C++ files with Include-What-You-Use."),
LintCheck('python',
"Format and lint Python files with autopep8 and flake8."),
LintCheck('numpydoc', "Lint Python files with numpydoc."),
LintCheck('cmake-format', "Format CMake files with cmake-format.py."),
LintCheck('rat',
"Check all sources files for license texts via Apache RAT."),
LintCheck('r', "Lint R files."),
LintCheck('docker', "Lint Dockerfiles with hadolint."),
]
def decorate_lint_command(cmd):
"""
Decorate the lint() command function to add individual per-check options.
"""
for check in lint_checks:
option = click.option("--{0}/--no-{0}".format(check.option_name),
default=None, help=check.help)
cmd = option(cmd)
return cmd
@archery.command(short_help="Check Arrow source tree for errors")
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory")
@click.option("--fix", is_flag=True, type=BOOL, default=False,
help="Toggle fixing the lint errors if the linter supports it.")
@click.option("--iwyu_all", is_flag=True, type=BOOL, default=False,
help="Run IWYU on all C++ files if enabled")
@click.option("-a", "--all", is_flag=True, default=False,
help="Enable all checks.")
@decorate_lint_command
@click.pass_context
def lint(ctx, src, fix, iwyu_all, **checks):
if checks.pop('all'):
# "--all" is given => enable all non-selected checks
for k, v in checks.items():
if v is None:
checks[k] = True
if not any(checks.values()):
raise click.UsageError(
"Need to enable at least one lint check (try --help)")
try:
linter(src, fix, iwyu_all=iwyu_all, **checks)
except LintValidationException:
sys.exit(1)
@archery.command(short_help="Lint python docstring with NumpyDoc")
@click.argument('symbols', nargs=-1)
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory")
@click.option("--allow-rule", "-a", multiple=True,
help="Allow only these rules")
@click.option("--disallow-rule", "-d", multiple=True,
help="Disallow these rules")
def numpydoc(src, symbols, allow_rule, disallow_rule):
"""
Pass list of modules or symbols as arguments to restrict the validation.
By default all modules of pyarrow are tried to be validated.
Examples
--------
archery numpydoc pyarrow.dataset
archery numpydoc pyarrow.csv pyarrow.json pyarrow.parquet
archery numpydoc pyarrow.array
"""
disallow_rule = disallow_rule or {'GL01', 'SA01', 'EX01', 'ES01'}
try:
results = python_numpydoc(symbols, allow_rules=allow_rule,
disallow_rules=disallow_rule)
for result in results:
result.ok()
except LintValidationException:
sys.exit(1)
@archery.group()
@click.pass_context
def benchmark(ctx):
""" Arrow benchmarking.
Use the diff sub-command to benchmark revisions, and/or build directories.
"""
pass
def benchmark_common_options(cmd):
def check_language(ctx, param, value):
if value not in {"cpp", "java"}:
raise click.BadParameter("cpp or java is supported now")
return value
options = [
click.option("--src", metavar="<arrow_src>", show_default=True,
default=None, callback=validate_arrow_sources,
help="Specify Arrow source directory"),
click.option("--preserve", type=BOOL, default=False, show_default=True,
is_flag=True,
help="Preserve workspace for investigation."),
click.option("--output", metavar="<output>",
type=click.File("w", encoding="utf8"), default=None,
help="Capture output result into file."),
click.option("--language", metavar="<lang>", type=str, default="cpp",
show_default=True, callback=check_language,
help="Specify target language for the benchmark"),
click.option("--build-extras", type=str, multiple=True,
help="Extra flags/options to pass to mvn build. "
"Can be stacked. For language=java"),
click.option("--benchmark-extras", type=str, multiple=True,
help="Extra flags/options to pass to mvn benchmark. "
"Can be stacked. For language=java"),
click.option("--cmake-extras", type=str, multiple=True,
help="Extra flags/options to pass to cmake invocation. "
"Can be stacked. For language=cpp")
]
cmd = java_toolchain_options(cmd)
cmd = cpp_toolchain_options(cmd)
return _apply_options(cmd, options)
def benchmark_filter_options(cmd):
options = [
click.option("--suite-filter", metavar="<regex>", show_default=True,
type=str, default=None,
help="Regex filtering benchmark suites."),
click.option("--benchmark-filter", metavar="<regex>",
show_default=True, type=str, default=None,
help="Regex filtering benchmarks.")
]
return _apply_options(cmd, options)
@benchmark.command(name="list", short_help="List benchmark suite")
@click.argument("rev_or_path", metavar="[<rev_or_path>]",
default="WORKSPACE", required=False)
@benchmark_common_options
@click.pass_context
def benchmark_list(ctx, rev_or_path, src, preserve, output, cmake_extras,
java_home, java_options, build_extras, benchmark_extras,
language, **kwargs):
""" List benchmark suite.
"""
with tmpdir(preserve=preserve) as root:
logger.debug("Running benchmark {}".format(rev_or_path))
if language == "cpp":
conf = CppBenchmarkRunner.default_configuration(
cmake_extras=cmake_extras, **kwargs)
runner_base = CppBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf)
elif language == "java":
for key in {'cpp_package_prefix', 'cxx_flags', 'cxx', 'cc'}:
del kwargs[key]
conf = JavaBenchmarkRunner.default_configuration(
java_home=java_home, java_options=java_options,
build_extras=build_extras, benchmark_extras=benchmark_extras,
**kwargs)
runner_base = JavaBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf)
for b in runner_base.list_benchmarks:
click.echo(b, file=output or sys.stdout)
@benchmark.command(name="run", short_help="Run benchmark suite")
@click.argument("rev_or_path", metavar="[<rev_or_path>]",
default="WORKSPACE", required=False)
@benchmark_common_options
@benchmark_filter_options
@click.option("--repetitions", type=int, default=-1,
help=("Number of repetitions of each benchmark. Increasing "
"may improve result precision. "
"[default: 1 for cpp, 5 for java"))
@click.pass_context
def benchmark_run(ctx, rev_or_path, src, preserve, output, cmake_extras,
java_home, java_options, build_extras, benchmark_extras,
language, suite_filter, benchmark_filter, repetitions,
**kwargs):
""" Run benchmark suite.
This command will run the benchmark suite for a single build. This is
used to capture (and/or publish) the results.
The caller can optionally specify a target which is either a git revision
(commit, tag, special values like HEAD) or a cmake build directory.
When a commit is referenced, a local clone of the arrow sources (specified
via --src) is performed and the proper branch is created. This is done in
a temporary directory which can be left intact with the `--preserve` flag.
The special token "WORKSPACE" is reserved to specify the current git
workspace. This imply that no clone will be performed.
Examples:
\b
# Run the benchmarks on current git workspace
\b
archery benchmark run
\b
# Run the benchmarks on current previous commit
\b
archery benchmark run HEAD~1
\b
# Run the benchmarks on current previous commit
\b
archery benchmark run --output=run.json
"""
with tmpdir(preserve=preserve) as root:
logger.debug("Running benchmark {}".format(rev_or_path))
if language == "cpp":
conf = CppBenchmarkRunner.default_configuration(
cmake_extras=cmake_extras, **kwargs)
repetitions = repetitions if repetitions != -1 else 1
runner_base = CppBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf,
repetitions=repetitions,
suite_filter=suite_filter, benchmark_filter=benchmark_filter)
elif language == "java":
for key in {'cpp_package_prefix', 'cxx_flags', 'cxx', 'cc'}:
del kwargs[key]
conf = JavaBenchmarkRunner.default_configuration(
java_home=java_home, java_options=java_options,
build_extras=build_extras, benchmark_extras=benchmark_extras,
**kwargs)
repetitions = repetitions if repetitions != -1 else 5
runner_base = JavaBenchmarkRunner.from_rev_or_path(
src, root, rev_or_path, conf,
repetitions=repetitions,
benchmark_filter=benchmark_filter)
# XXX for some reason, the benchmark runner only does its work
# when asked to JSON-serialize the results, so produce a JSON
# output even when none is requested.
json_out = json.dumps(runner_base, cls=JsonEncoder)
if output is not None:
output.write(json_out)
@benchmark.command(name="diff", short_help="Compare benchmark suites")
@benchmark_common_options
@benchmark_filter_options
@click.option("--threshold", type=float, default=DEFAULT_THRESHOLD,
show_default=True,
help="Regression failure threshold in percentage.")
@click.option("--repetitions", type=int, default=1, show_default=True,
help=("Number of repetitions of each benchmark. Increasing "
"may improve result precision. "
"[default: 1 for cpp, 5 for java"))
@click.option("--no-counters", type=BOOL, default=False, is_flag=True,
help="Hide counters field in diff report.")
@click.argument("contender", metavar="[<contender>",
default=ArrowSources.WORKSPACE, required=False)
@click.argument("baseline", metavar="[<baseline>]]", default="origin/master",
required=False)
@click.pass_context
def benchmark_diff(ctx, src, preserve, output, language, cmake_extras,
suite_filter, benchmark_filter, repetitions, no_counters,
java_home, java_options, build_extras, benchmark_extras,
threshold, contender, baseline, **kwargs):
"""Compare (diff) benchmark runs.
This command acts like git-diff but for benchmark results.
The caller can optionally specify both the contender and the baseline. If
unspecified, the contender will default to the current workspace (like git)
and the baseline will default to master.
Each target (contender or baseline) can either be a git revision
(commit, tag, special values like HEAD) or a cmake build directory. This
allow comparing git commits, and/or different compilers and/or compiler
flags.
When a commit is referenced, a local clone of the arrow sources (specified
via --src) is performed and the proper branch is created. This is done in
a temporary directory which can be left intact with the `--preserve` flag.
The special token "WORKSPACE" is reserved to specify the current git
workspace. This imply that no clone will be performed.
Examples:
\b
# Compare workspace (contender) with master (baseline)
\b
archery benchmark diff
\b
# Compare master (contender) with latest version (baseline)
\b
export LAST=$(git tag -l "apache-arrow-[0-9]*" | sort -rV | head -1)
\b
archery benchmark diff master "$LAST"
\b
# Compare g++7 (contender) with clang++-8 (baseline) builds
\b
archery build --with-benchmarks=true \\
--cxx-flags=-ftree-vectorize \\
--cc=gcc-7 --cxx=g++-7 gcc7-build
\b
archery build --with-benchmarks=true \\
--cxx-flags=-flax-vector-conversions \\
--cc=clang-8 --cxx=clang++-8 clang8-build
\b
archery benchmark diff gcc7-build clang8-build
\b
# Compare default targets but scoped to the suites matching
# `^arrow-compute-aggregate` and benchmarks matching `(Sum|Mean)Kernel`.
\b
archery benchmark diff --suite-filter="^arrow-compute-aggregate" \\
--benchmark-filter="(Sum|Mean)Kernel"
\b
# Capture result in file `result.json`
\b
archery benchmark diff --output=result.json
\b
# Equivalently with no stdout clutter.
archery --quiet benchmark diff > result.json
\b
# Comparing with a cached results from `archery benchmark run`
\b
archery benchmark run --output=run.json HEAD~1
\b
# This should not recompute the benchmark from run.json
archery --quiet benchmark diff WORKSPACE run.json > result.json
"""
with tmpdir(preserve=preserve) as root:
logger.debug("Comparing {} (contender) with {} (baseline)"
.format(contender, baseline))
if language == "cpp":
conf = CppBenchmarkRunner.default_configuration(
cmake_extras=cmake_extras, **kwargs)
repetitions = repetitions if repetitions != -1 else 1
runner_cont = CppBenchmarkRunner.from_rev_or_path(
src, root, contender, conf,
repetitions=repetitions,
suite_filter=suite_filter,
benchmark_filter=benchmark_filter)
runner_base = CppBenchmarkRunner.from_rev_or_path(
src, root, baseline, conf,
repetitions=repetitions,
suite_filter=suite_filter,
benchmark_filter=benchmark_filter)
elif language == "java":
for key in {'cpp_package_prefix', 'cxx_flags', 'cxx', 'cc'}:
del kwargs[key]
conf = JavaBenchmarkRunner.default_configuration(
java_home=java_home, java_options=java_options,
build_extras=build_extras, benchmark_extras=benchmark_extras,
**kwargs)
repetitions = repetitions if repetitions != -1 else 5
runner_cont = JavaBenchmarkRunner.from_rev_or_path(
src, root, contender, conf,
repetitions=repetitions,
benchmark_filter=benchmark_filter)
runner_base = JavaBenchmarkRunner.from_rev_or_path(
src, root, baseline, conf,
repetitions=repetitions,
benchmark_filter=benchmark_filter)
runner_comp = RunnerComparator(runner_cont, runner_base, threshold)
# TODO(kszucs): test that the output is properly formatted jsonlines
comparisons_json = _get_comparisons_as_json(runner_comp.comparisons)
ren_counters = language == "java"
formatted = _format_comparisons_with_pandas(comparisons_json,
no_counters, ren_counters)
print(formatted, file=output or sys.stdout)
def _get_comparisons_as_json(comparisons):
buf = StringIO()
for comparator in comparisons:
json.dump(comparator, buf, cls=JsonEncoder)
buf.write("\n")
return buf.getvalue()
def _format_comparisons_with_pandas(comparisons_json, no_counters,
ren_counters):
pd = _import_pandas()
df = pd.read_json(StringIO(comparisons_json), lines=True)
# parse change % so we can sort by it
df['change %'] = df.pop('change').str[:-1].map(float)
first_regression = len(df) - df['regression'].sum()
fields = ['benchmark', 'baseline', 'contender', 'change %']
if not no_counters:
fields += ['counters']
df = df[fields]
if ren_counters:
df = df.rename(columns={'counters': 'configurations'})
df = df.sort_values(by='change %', ascending=False)
def labelled(title, df):
if len(df) == 0:
return ''
title += ': ({})'.format(len(df))
df_str = df.to_string(index=False)
bar = '-' * df_str.index('\n')
return '\n'.join([bar, title, bar, df_str])
return '\n\n'.join([labelled('Non-regressions', df[:first_regression]),
labelled('Regressions', df[first_regression:])])
# ----------------------------------------------------------------------
# Integration testing
def _set_default(opt, default):
if opt is None:
return default
return opt
@archery.command(short_help="Execute protocol and Flight integration tests")
@click.option('--with-all', is_flag=True, default=False,
help=('Include all known languages by default '
'in integration tests'))
@click.option('--random-seed', type=int, default=12345,
help="Seed for PRNG when generating test data")
@click.option('--with-cpp', type=bool, default=False,
help='Include C++ in integration tests')
@click.option('--with-csharp', type=bool, default=False,
help='Include C# in integration tests')
@click.option('--with-java', type=bool, default=False,
help='Include Java in integration tests')
@click.option('--with-js', type=bool, default=False,
help='Include JavaScript in integration tests')
@click.option('--with-go', type=bool, default=False,
help='Include Go in integration tests')
@click.option('--with-rust', type=bool, default=False,
help='Include Rust in integration tests',
envvar="ARCHERY_INTEGRATION_WITH_RUST")
@click.option('--write_generated_json', default="",
help='Generate test JSON to indicated path')
@click.option('--run-flight', is_flag=True, default=False,
help='Run Flight integration tests')
@click.option('--debug', is_flag=True, default=False,
help='Run executables in debug mode as relevant')
@click.option('--serial', is_flag=True, default=False,
help='Run tests serially, rather than in parallel')
@click.option('--tempdir', default=None,
help=('Directory to use for writing '
'integration test temporary files'))
@click.option('stop_on_error', '-x', '--stop-on-error',
is_flag=True, default=False,
help='Stop on first error')
@click.option('--gold-dirs', multiple=True,
help="gold integration test file paths")
@click.option('-k', '--match',
help=("Substring for test names to include in run, "
"e.g. -k primitive"))
def integration(with_all=False, random_seed=12345, **args):
from .integration.runner import write_js_test_json, run_all_tests
import numpy as np
# FIXME(bkietz) Include help strings for individual testers.
# For example, CPPTester's ARROW_CPP_EXE_PATH environment variable.
# Make runs involving data generation deterministic
np.random.seed(random_seed)
gen_path = args['write_generated_json']
languages = ['cpp', 'csharp', 'java', 'js', 'go', 'rust']
enabled_languages = 0
for lang in languages:
param = 'with_{}'.format(lang)
if with_all:
args[param] = with_all
if args[param]:
enabled_languages += 1
if gen_path:
try:
os.makedirs(gen_path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
write_js_test_json(gen_path)
else:
if enabled_languages == 0:
raise Exception("Must enable at least 1 language to test")
run_all_tests(**args)
@archery.command()
@click.option('--event-name', '-n', required=True)
@click.option('--event-payload', '-p', type=click.File('r', encoding='utf8'),
default='-', required=True)
@click.option('--arrow-token', envvar='ARROW_GITHUB_TOKEN',
help='OAuth token for responding comment in the arrow repo')
def trigger_bot(event_name, event_payload, arrow_token):
from .bot import CommentBot, actions
event_payload = json.loads(event_payload.read())
bot = CommentBot(name='github-actions', handler=actions, token=arrow_token)
bot.handle(event_name, event_payload)
@archery.group('release')
@click.option("--src", metavar="<arrow_src>", default=None,
callback=validate_arrow_sources,
help="Specify Arrow source directory.")
@click.option("--jira-cache", type=click.Path(), default=None,
help="File path to cache queried JIRA issues per version.")
@click.pass_obj
def release(obj, src, jira_cache):
"""Release releated commands."""
from .release import Jira, CachedJira
jira = Jira()
if jira_cache is not None:
jira = CachedJira(jira_cache, jira=jira)
obj['jira'] = jira
obj['repo'] = src.path
@release.command('curate')
@click.argument('version')
@click.pass_obj
def release_curate(obj, version):
"""Release curation."""
from .release import Release
release = Release.from_jira(version, jira=obj['jira'], repo=obj['repo'])
curation = release.curate()
click.echo(curation.render('console'))
@release.group('changelog')
def release_changelog():
"""Release changelog."""
pass
@release_changelog.command('add')
@click.argument('version')
@click.pass_obj
def release_changelog_add(obj, version):
"""Prepend the changelog with the current release"""
from .release import Release
jira, repo = obj['jira'], obj['repo']
# just handle the current version
release = Release.from_jira(version, jira=jira, repo=repo)
if release.is_released:
raise ValueError('This version has been already released!')
changelog = release.changelog()
changelog_path = pathlib.Path(repo) / 'CHANGELOG.md'
current_content = changelog_path.read_text()
new_content = changelog.render('markdown') + current_content
changelog_path.write_text(new_content)
click.echo("CHANGELOG.md is updated!")
@release_changelog.command('generate')
@click.argument('version')
@click.argument('output', type=click.File('w', encoding='utf8'), default='-')
@click.pass_obj
def release_changelog_generate(obj, version, output):
"""Generate the changelog of a specific release."""
from .release import Release
jira, repo = obj['jira'], obj['repo']
# just handle the current version
release = Release.from_jira(version, jira=jira, repo=repo)
changelog = release.changelog()
output.write(changelog.render('markdown'))
@release_changelog.command('regenerate')
@click.pass_obj
def release_changelog_regenerate(obj):
"""Regeneretate the whole CHANGELOG.md file"""
from .release import Release
jira, repo = obj['jira'], obj['repo']
changelogs = []
for version in jira.project_versions('ARROW'):
if not version.released:
continue
release = Release.from_jira(version, jira=jira, repo=repo)
click.echo('Querying changelog for version: {}'.format(version))
changelogs.append(release.changelog())
click.echo('Rendering new CHANGELOG.md file...')
changelog_path = pathlib.Path(repo) / 'CHANGELOG.md'
with changelog_path.open('w') as fp:
for cl in changelogs:
fp.write(cl.render('markdown'))
@release.command('cherry-pick')
@click.argument('version')
@click.option('--dry-run/--execute', default=True,
help="Display the git commands instead of executing them.")
@click.option('--recreate/--continue', default=True,
help="Recreate the maintenance branch or only apply unapplied "
"patches.")
@click.pass_obj
def release_cherry_pick(obj, version, dry_run, recreate):
"""
Cherry pick commits.
"""
from .release import Release, MinorRelease, PatchRelease
release = Release.from_jira(version, jira=obj['jira'], repo=obj['repo'])
if not isinstance(release, (MinorRelease, PatchRelease)):
raise click.UsageError('Cherry-pick command only supported for minor '
'and patch releases')
if not dry_run:
release.cherry_pick_commits(recreate_branch=recreate)
click.echo('Executed the following commands:\n')
click.echo(
'git checkout {} -b {}'.format(release.previous.tag, release.branch)
)
for commit in release.commits_to_pick():
click.echo('git cherry-pick {}'.format(commit.hexsha))
@archery.group("linking")
@click.pass_obj
def linking(obj):
"""
Quick and dirty utilities for checking library linkage.
"""
pass
@linking.command("check-dependencies")
@click.argument("paths", nargs=-1)
@click.option("--allow", "-a", "allowed", multiple=True,
help="Name of the allowed libraries")
@click.option("--disallow", "-d", "disallowed", multiple=True,
help="Name of the disallowed libraries")
@click.pass_obj
def linking_check_dependencies(obj, allowed, disallowed, paths):
from .linking import check_dynamic_library_dependencies, DependencyError
allowed, disallowed = set(allowed), set(disallowed)
try:
for path in map(pathlib.Path, paths):
check_dynamic_library_dependencies(path, allowed=allowed,
disallowed=disallowed)
except DependencyError as e:
raise click.ClickException(str(e))
add_optional_command("docker", module=".docker.cli", function="docker",
parent=archery)
add_optional_command("crossbow", module=".crossbow.cli", function="crossbow",
parent=archery)
if __name__ == "__main__":
archery(obj={})
| {
"content_hash": "9ea3303f4ab54bb63f64935915b3f5a9",
"timestamp": "",
"source": "github",
"line_count": 930,
"max_line_length": 79,
"avg_line_length": 38.755913978494625,
"alnum_prop": 0.6317731598368616,
"repo_name": "laurentgo/arrow",
"id": "d8eeb7bab0ebbaab8f920dee6ce69527cf293c40",
"size": "36829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dev/archery/archery/cli.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "73655"
},
{
"name": "Awk",
"bytes": "3683"
},
{
"name": "Batchfile",
"bytes": "32252"
},
{
"name": "C",
"bytes": "328114"
},
{
"name": "C#",
"bytes": "419434"
},
{
"name": "C++",
"bytes": "7254875"
},
{
"name": "CMake",
"bytes": "401649"
},
{
"name": "CSS",
"bytes": "3946"
},
{
"name": "Dockerfile",
"bytes": "42193"
},
{
"name": "FreeMarker",
"bytes": "2274"
},
{
"name": "Go",
"bytes": "364102"
},
{
"name": "HTML",
"bytes": "23047"
},
{
"name": "Java",
"bytes": "2296962"
},
{
"name": "JavaScript",
"bytes": "84850"
},
{
"name": "Lua",
"bytes": "8741"
},
{
"name": "M4",
"bytes": "8713"
},
{
"name": "MATLAB",
"bytes": "9068"
},
{
"name": "Makefile",
"bytes": "44853"
},
{
"name": "Meson",
"bytes": "36931"
},
{
"name": "Objective-C",
"bytes": "7559"
},
{
"name": "PLpgSQL",
"bytes": "56995"
},
{
"name": "Perl",
"bytes": "3799"
},
{
"name": "Python",
"bytes": "1548489"
},
{
"name": "R",
"bytes": "155922"
},
{
"name": "Ruby",
"bytes": "682150"
},
{
"name": "Rust",
"bytes": "1609482"
},
{
"name": "Shell",
"bytes": "251436"
},
{
"name": "Thrift",
"bytes": "137291"
},
{
"name": "TypeScript",
"bytes": "932690"
}
],
"symlink_target": ""
} |
import pytest
def test_answer_is_served_from_chosen_port(docker_compose, nginxproxy):
r = nginxproxy.get("http://web.nginx-proxy.tld/port")
assert r.status_code == 200
assert "answer from port 90\n" in r.text
| {
"content_hash": "2645b522bca2fcdf2a01095c34655ae4",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 71,
"avg_line_length": 31.857142857142858,
"alnum_prop": 0.7085201793721974,
"repo_name": "itsafire/nginx-proxy",
"id": "3c95ba629c269be8fc793328de904e5445b44320",
"size": "223",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "test/test_multiple-ports/test_VIRTUAL_PORT.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1432"
},
{
"name": "Makefile",
"bytes": "339"
},
{
"name": "Python",
"bytes": "55412"
},
{
"name": "Shell",
"bytes": "10374"
}
],
"symlink_target": ""
} |
import random
from Pyro5.api import expose, oneway
@expose
class GameServer(object):
def __init__(self, engine):
self.engine = engine
def register(self, name, observer):
robot = self.engine.signup_robot(name, observer)
self._pyroDaemon.register(robot) # make the robot a pyro object
return robot
@expose
class RemoteBot(object):
def __init__(self, robot, engine):
self.robot = robot
self.engine = engine
def get_data(self):
return self.robot
def change_direction(self, direction):
self.robot.dx, self.robot.dy = direction
def emote(self, text):
self.robot.emote(text)
def terminate(self):
self.engine.remove_robot(self.robot)
@expose
class LocalGameObserver(object):
def __init__(self, name):
self.name = name
self.robot = None
@oneway
def world_update(self, iteration, world, robotdata):
# change directions randomly
if random.random() > 0.8:
if random.random() >= 0.5:
dx, dy = random.randint(-1, 1), 0
else:
dx, dy = 0, random.randint(-1, 1)
self.robot.change_direction((dx, dy))
def start(self):
self.robot.emote("Here we go!")
def victory(self):
print("[%s] I WON!!!" % self.name)
def death(self, killer):
if killer:
print("[%s] I DIED (%s did it)" % (self.name, killer.name))
else:
print("[%s] I DIED" % self.name)
@expose
class GameObserver(object):
def world_update(self, iteration, world, robotdata):
pass
def start(self):
print("Battle starts!")
def victory(self):
print("I WON!!!")
def death(self, killer):
print("I DIED")
if killer:
print("%s KILLED ME :(" % killer.name)
| {
"content_hash": "1a1ed4e37ce55f4ac603d815a2cd6a84",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 72,
"avg_line_length": 23.858974358974358,
"alnum_prop": 0.5706609349811929,
"repo_name": "irmen/Pyro5",
"id": "a7cdb4f3b9a81a3b0a0ff0077c71a5a2c78ded6d",
"size": "1861",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/robots/remote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1175"
},
{
"name": "Python",
"bytes": "478515"
}
],
"symlink_target": ""
} |
""" Utilities for driving sets of parcel model integration strategies.
Occasionally, a pathological set of input parameters to the parcel model
will really muck up the ODE solver's ability to integrate the model.
In that case, it would be nice to quietly adjust some of the numerical
parameters for the ODE solver and re-submit the job. This module includes a
workhorse function :func:`iterate_runs` which can serve this purpose and can
serve as an example for more complex integration strategies. Alternatively,
:func:`run_model`is a useful shortcut for building/running a model and snagging
its output.
"""
from numpy import empty, nan
from pandas import DataFrame
from .activation import mbn2014, arg2000
from .parcel import ParcelModel
from pyrcel.util import ParcelModelError
def run_model(
V,
initial_aerosols,
T,
P,
dt,
S0=-0.0,
max_steps=1000,
t_end=500.0,
solver="lsoda",
output_fmt="smax",
terminate=False,
solver_kws=None,
model_kws=None,
):
""" Setup and run the parcel model with given solver configuration.
Parameters
----------
V, T, P : float
Updraft speed and parcel initial temperature and pressure.
S0 : float, optional, default 0.0
Initial supersaturation, as a percent. Defaults to 100% relative humidity.
initial_aerosols : array_like of :class:`AerosolSpecies`
Set of aerosol populations contained in the parcel.
dt : float
Solver timestep, in seconds.
max_steps : int, optional, default 1000
Maximum number of steps per solver iteration. Defaults to 1000; setting
excessively high could produce extremely long computation times.
t_end : float, optional, default 500.0
Model time in seconds after which the integration will stop.
solver : string, optional, default 'lsoda'
Alias of which solver to use; see :class:`Integrator` for all options.
output_fmt : string, optional, default 'smax'
Alias indicating which output format to use; see :class:`ParcelModel` for
all options.
solver_kws, model_kws : dicts, optional
Additional arguments/configuration to pass to the numerical integrator or model.
Returns
-------
Smax : (user-defined)
Output from parcel model simulation based on user-specified `output_fmt` argument. See
:class:`ParcelModel` for details.
Raises
------
ParcelModelError
If the model fails to initialize or breaks during runtime.
"""
# Setup kw dicts
if model_kws is None:
model_kws = {}
if solver_kws is None:
solver_kws = {}
if V <= 0:
return 0.0
try:
model = ParcelModel(initial_aerosols, V, T, S0, P, **model_kws)
Smax = model.run(
t_end,
dt,
max_steps,
solver=solver,
output_fmt=output_fmt,
terminate=terminate,
**solver_kws
)
except ParcelModelError:
return None
return Smax
def iterate_runs(
V,
initial_aerosols,
T,
P,
S0=-0.0,
dt=0.01,
dt_iters=2,
t_end=500.0,
max_steps=500,
output_fmt="smax",
fail_easy=True,
):
""" Iterate through several different strategies for integrating the parcel model.
As long as `fail_easy` is set to `False`, the strategies this method implements are:
1. **CVODE** with a 10 second time limit and 2000 step limit.
2. **LSODA** with up to `dt_iters` iterations, where the timestep `dt` is
halved each time.
3. **LSODE** with coarse tolerance and the original timestep.
If these strategies all fail, the model will print a statement indicating such
and return either -9999 if `output_fmt` was 'smax', or an empty array or DataFrame
accordingly.
Parameters
----------
V, T, P : float
Updraft speed and parcel initial temperature and pressure.
S0 : float, optional, default 0.0
Initial supersaturation, as a percent. Defaults to 100% relative humidity.
initial_aerosols : array_like of :class:`AerosolSpecies`
Set of aerosol populations contained in the parcel.
dt : float
Solver timestep, in seconds.
dt_iters : int, optional, default 2
Number of times to halve `dt` when attempting **LSODA** solver.
max_steps : int, optional, default 1000
Maximum number of steps per solver iteration. Defaults to 1000; setting
excessively high could produce extremely long computation times.
t_end : float, optional, default 500.0
Model time in seconds after which the integration will stop.
output : string, optional, default 'smax'
Alias indicating which output format to use; see :class:`ParcelModel` for
all options.
fail_easy : boolean, optional, default `True`
If `True`, then stop after the first strategy (**CVODE**)
Returns
-------
Smax : (user-defined)
Output from parcel model simulation based on user-specified `output` argument. See
:class:`ParcelModel` for details.
"""
aerosols = initial_aerosols
if V <= 0:
return 0.0, 0.0, 0.0
# Check that there are actually aerosols to deal with
aerosol_N = [a.distribution.N for a in initial_aerosols]
if len(aerosol_N) == 1:
if aerosol_N[0] < 0.01:
return -9999.0, -9999.0, -9999.0
else:
new_aerosols = []
for i in range(len(aerosol_N)):
if aerosol_N[i] > 0.01:
new_aerosols.append(initial_aerosols[i])
aerosols = new_aerosols[:]
S_max_arg, _, _ = arg2000(V, T, P, aerosols)
S_max_fn, _, _ = mbn2014(V, T, P, aerosols)
dt_orig = dt * 1.0
finished = False
S_max = None
# Strategy 1: Try CVODE with modest tolerances.
print(" Trying CVODE with default tolerance")
S_max = run_model(
V,
aerosols,
T,
P,
dt,
S0=S0,
max_steps=2000,
solver="cvode",
t_end=t_end,
output_fmt=output_fmt,
solver_kws={
"iter": "Newton",
"time_limit": 10.0,
"linear_solver": "DENSE",
},
)
# Strategy 2: Iterate over some increasingly relaxed tolerances for LSODA.
if (S_max is None) and not fail_easy:
while dt > dt_orig / (2 ** dt_iters):
print(
" Trying LSODA, dt = %1.3e, max_steps = %d" % (dt, max_steps)
)
S_max = run_model(
V,
aerosols,
T,
P,
dt,
S0,
max_steps,
solver="lsoda",
t_end=t_end,
)
if not S_max:
dt /= 2.0
print(" Retrying...")
else:
finished = True
break
# Strategy 3: Last ditch numerical integration with LSODE. This will likely take a
# a very long time.
if (not finished) and (S_max is None) and (not fail_easy):
print(" Trying LSODE")
S_max = run_model(
V,
aerosols,
T,
P,
dt_orig,
max_steps=1000,
solver="lsode",
t_end=t_end,
S0=S0,
)
# Strategy 4: If all else fails return -9999.
if S_max is None:
if output_fmt == "smax":
S_max = -9999.0
elif output_fmt == "arrays":
S_max = empty([0]), empty([0])
elif output_fmt == "dataframes":
S_max = (
DataFrame(data={"S": [nan]}),
DataFrame(data={"aerosol1": [nan]}),
)
else:
S_max = nan
print(" failed", V, dt)
return S_max, S_max_arg, S_max_fn
| {
"content_hash": "d5ea8bb6ecee8a210f01140b806324b1",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 94,
"avg_line_length": 31.22222222222222,
"alnum_prop": 0.5874428063040162,
"repo_name": "darothen/parcel_model",
"id": "6157074ab645acffa2de2ba44e252838ff7a6015",
"size": "7868",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "pyrcel/driver.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "146117"
}
],
"symlink_target": ""
} |
from filebeat import BaseTest
import os
import time
"""
Tests for the multiline log messages
"""
class Test(BaseTest):
def test_java_elasticsearch_log(self):
"""
Test that multi lines for java logs works.
It checks that all lines which do not start with [ are append to the last line starting with [
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after"
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/elasticsearch-multiline-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 20 == len(output)
def test_c_style_log(self):
"""
Test that multi lines for c style log works
It checks that all lines following a line with \\ are appended to the previous line
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="\\\\$",
match="before"
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/multiline-c-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=4),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 4 == len(output)
def test_rabbitmq_multiline_log(self):
"""
Test rabbitmq multiline log
Special about this log file is that it has empty new lines
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^=[A-Z]+",
match="after",
negate="true",
)
logentry = """=ERROR REPORT==== 3-Feb-2016::03:10:32 ===
connection <0.23893.109>, channel 3 - soft error:
{amqp_error,not_found,
"no queue 'bucket-1' in vhost '/'",
'queue.declare'}
"""
os.mkdir(self.working_dir + "/log/")
proc = self.start_beat()
testfile = self.working_dir + "/log/rabbitmq.log"
file = open(testfile, 'w')
iterations = 3
for n in range(0, iterations):
file.write(logentry)
file.close()
# wait for the "Skipping file" log message
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that output file has the same number of lines as the log file
assert 3 == len(output)
def test_max_lines(self):
"""
Test the maximum number of lines that is sent by multiline
All further lines are discarded
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
max_lines=3
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/elasticsearch-multiline-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Checks line 3 is sent
assert True == self.log_contains(
"MetaDataMappingService.java:388", "output/filebeat")
# Checks line 4 is not sent anymore
assert False == self.log_contains(
"InternalClusterService.java:388", "output/filebeat")
# Check that output file has the same number of lines as the log file
assert 20 == len(output)
def test_timeout(self):
"""
Test that data is sent after timeout
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
file = open(testfile, 'w', 0)
file.write("[2015] hello world")
file.write("\n")
file.write(" First Line\n")
file.write(" Second Line\n")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=1),
max_timeout=10)
# Because of the timeout the following two lines should be put together
file.write(" This should not be third\n")
file.write(" This should not be fourth\n")
# This starts a new pattern
file.write("[2016] Hello world\n")
# This line should be appended
file.write(" First line again\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
assert 3 == len(output)
def test_max_bytes(self):
"""
Test the maximum number of bytes that is sent
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
max_bytes=60
)
os.mkdir(self.working_dir + "/log/")
self.copy_files(["logs/elasticsearch-multiline-log.log"],
source_dir="../files",
target_dir="log")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=20),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output()
# Check that first 60 chars are sent
assert True == self.log_contains("cluster.metadata", "output/filebeat")
# Checks that chars aferwards are not sent
assert False == self.log_contains("Zach", "output/filebeat")
# Check that output file has the same number of lines as the log file
assert 20 == len(output)
def test_close_timeout_with_multiline(self):
"""
Test if multiline events are split up with close_timeout
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
close_timeout="2s",
)
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w', 0) as file:
file.write("[2015] hello world")
file.write("\n")
file.write(" First Line\n")
file.write(" Second Line\n")
proc = self.start_beat()
# Wait until harvester is closed because of timeout
# This leads to the partial event above to be sent
self.wait_until(
lambda: self.log_contains(
"Closing harvester because close_timeout was reached"),
max_timeout=15)
# Because of the timeout the following two lines should be put together
with open(testfile, 'a', 0) as file:
file.write(" This should not be third\n")
file.write(" This should not be fourth\n")
# This starts a new pattern
file.write("[2016] Hello world\n")
# This line should be appended
file.write(" First line again\n")
self.wait_until(
lambda: self.output_has(lines=3),
max_timeout=10)
proc.check_kill_and_wait()
# close_timeout must have closed the reader exactly twice
self.wait_until(
lambda: self.log_contains_count(
"Closing harvester because close_timeout was reached") >= 1,
max_timeout=15)
output = self.read_output()
assert 3 == len(output)
def test_consecutive_newline(self):
"""
Test if consecutive multilines have an affect on multiline
"""
self.render_config_template(
path=os.path.abspath(self.working_dir) + "/log/*",
multiline=True,
pattern="^\[",
negate="true",
match="after",
close_timeout="2s",
)
logentry1 = """[2016-09-02 19:54:23 +0000] Started 2016-09-02 19:54:23 +0000 "GET" for /gaq?path=%2FCA%2FFallbrook%2F1845-Acacia-Ln&referer=http%3A%2F%2Fwww.xxxxx.com%2FAcacia%2BLn%2BFallbrook%2BCA%2Baddresses&search_bucket=none&page_controller=v9%2Faddresses&page_action=show at 23.235.47.31
X-Forwarded-For:72.197.227.93, 23.235.47.31
Processing by GoogleAnalyticsController#index as JSON
Parameters: {"path"=>"/CA/Fallbrook/1845-Acacia-Ln", "referer"=>"http://www.xxxx.com/Acacia+Ln+Fallbrook+CA+addresses", "search_bucket"=>"none", "page_controller"=>"v9/addresses", "page_action"=>"show"}
Completed 200 OK in 5ms (Views: 1.9ms)"""
logentry2 = """[2016-09-02 19:54:23 +0000] Started 2016-09-02 19:54:23 +0000 "GET" for /health_check at xxx.xx.44.181
X-Forwarded-For:
SetAdCodeMiddleware.default_ad_code referer
SetAdCodeMiddleware.default_ad_code path /health_check
SetAdCodeMiddleware.default_ad_code route """
os.mkdir(self.working_dir + "/log/")
testfile = self.working_dir + "/log/test.log"
with open(testfile, 'w', 0) as file:
file.write(logentry1 + "\n")
file.write(logentry2 + "\n")
proc = self.start_beat()
self.wait_until(
lambda: self.output_has(lines=2),
max_timeout=10)
proc.check_kill_and_wait()
output = self.read_output_json()
output[0]["message"] = logentry1
output[1]["message"] = logentry2
def test_invalid_config(self):
"""
Test that filebeat errors if pattern is missing config
"""
self.render_config_template(
path=os.path.abspath(self.working_dir + "/log/") + "*",
multiline=True,
match="after",
)
proc = self.start_beat()
self.wait_until(lambda: self.log_contains("missing required field accessing") == 1)
proc.check_kill_and_wait(exit_code=1)
| {
"content_hash": "96fb2cc765d30d8dfab3ba9d5a2c43c7",
"timestamp": "",
"source": "github",
"line_count": 354,
"max_line_length": 300,
"avg_line_length": 31.44632768361582,
"alnum_prop": 0.554796981674452,
"repo_name": "taitan-org/inflog",
"id": "2bf67391f36d2f27e53618ecf725bf24eb38f60f",
"size": "11132",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "filebeat/tests/system/test_multiline.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "216"
},
{
"name": "Go",
"bytes": "1171204"
},
{
"name": "Makefile",
"bytes": "23899"
},
{
"name": "Python",
"bytes": "250534"
},
{
"name": "Shell",
"bytes": "1141"
}
],
"symlink_target": ""
} |
import argparse
import json
import os
import re
import urllib.request
_REPO_URL = 'https://dl.google.com/dl/android/maven2'
_GROUP_NAME = 'com/android/support'
_MODULE_NAME = 'cardview-v7'
_FILE_EXT = 'aar'
_OVERRIDE_LATEST = None
_PATCH_VERSION = 'cr1'
def do_latest():
if _OVERRIDE_LATEST is not None:
print(_OVERRIDE_LATEST + f'.{_PATCH_VERSION}')
return
maven_metadata_url = '{}/{}/{}/maven-metadata.xml'.format(
_REPO_URL, _GROUP_NAME, _MODULE_NAME)
metadata = urllib.request.urlopen(maven_metadata_url).read().decode(
'utf-8')
# Do not parse xml with the python included parser since it is susceptible
# to maliciously crafted xmls. Only use regular expression parsing to be
# safe. RE should be enough to handle what we need to extract.
match = re.search('<latest>([^<]+)</latest>', metadata)
if match:
latest = match.group(1)
else:
# if no latest info was found just hope the versions are sorted and the
# last one is the latest (as is commonly the case).
latest = re.findall('<version>([^<]+)</version>', metadata)[-1]
print(latest + f'.{_PATCH_VERSION}')
def get_download_url(version):
# Remove the patch version when getting the download url
version_no_patch, patch = version.rsplit('.', 1)
if patch.startswith('cr'):
version = version_no_patch
file_url = '{0}/{1}/{2}/{3}/{2}-{3}.{4}'.format(_REPO_URL, _GROUP_NAME,
_MODULE_NAME, version,
_FILE_EXT)
file_name = file_url.rsplit('/', 1)[-1]
partial_manifest = {
'url': [file_url],
'name': [file_name],
'ext': '.' + _FILE_EXT,
}
print(json.dumps(partial_manifest))
def main():
ap = argparse.ArgumentParser()
sub = ap.add_subparsers()
latest = sub.add_parser('latest')
latest.set_defaults(func=lambda _opts: do_latest())
download = sub.add_parser('get_url')
download.set_defaults(
func=lambda _opts: get_download_url(os.environ['_3PP_VERSION']))
opts = ap.parse_args()
opts.func(opts)
if __name__ == '__main__':
main()
| {
"content_hash": "41213d5af9247549d31a80d3498dc0a9",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 79,
"avg_line_length": 31.52857142857143,
"alnum_prop": 0.594019030357952,
"repo_name": "scheib/chromium",
"id": "7be954e2a54027924b05b8fc34ccb38fd3b78927",
"size": "2496",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "third_party/android_deps/libs/com_android_support_cardview_v7/3pp/fetch.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
import unittest
import re
import threading
import time
from selenium import webdriver
from app import create_app, db
from app.models import User, Role, Post, Permission
class SeleniumTestCase(unittest.TestCase):
client = None
@classmethod
def setUpClass(cls):
#start Chrome
try:
cls.client = webdriver.Chrome()
except:
pass
if cls.client:
cls.app = create_app('testing')
cls.app_context = cls.app.app_context()
cls.app_context.push()
import logging
logger = logging.getLogger('werkzeug')
logger.setLevel("ERROR")
db.create_all()
Role.insert_roles()
User.generate_fake(100)
Post.generate_fake(100)
admin_role = Role.query.filter_by(permissions=0xff).first()
admin = User(email='[email protected]', username='pamplemouse', password='test', role=admin_role, confirmed=True)
db.session.add(admin)
db.session.commit()
threading.Thread(target=cls.app.run).start()
@classmethod
def tearDownClass(cls):
if cls.client:
cls.client.get('http://localhost:5000/shutdown')
cls.client.stop()
db.drop_all()
db.session.remove()
cls.app_context.pop()
def setUp(self):
if not self.client:
self.skipTest('Web browser not available')
def tearDown(self):
pass
def test_admin_home_page(self):
self.client.get('http://localhost:5000/')
self.assertTrue(re.search('Hello,\s+Stranger!', self.client.page_source))
self.client.find_element_by_link_text('Sign in').click()
self.assertTrue('<h1>Login</h1>' in self.client.page_source)
self.client.find_element_by_name('email').send_keys('[email protected]')
self.client.find_element_by_name('password').send_keys('test')
self.client.find_element_by_name('submit').click()
self.assertTrue(re.search('Hello,\s+pamplemouse!', self.client.page_source))
self.client.find_element_by_link_text('Profile').click()
self.assertTrue('<h1>pamplemouse</h1>' in self.client.page_source)
| {
"content_hash": "632ac9e263914566b9d339b791aae998",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 136,
"avg_line_length": 32.47945205479452,
"alnum_prop": 0.5845634753268663,
"repo_name": "delitamakanda/socialite",
"id": "fb6744a5d2ba9d535c0a4160a9e2d490058b33db",
"size": "2371",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_selenium.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "134"
},
{
"name": "HTML",
"bytes": "50249"
},
{
"name": "JavaScript",
"bytes": "825"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "76668"
}
],
"symlink_target": ""
} |
"""
Cache driver that uses SQLite to store information about cached images
"""
from __future__ import absolute_import
from contextlib import contextmanager
import os
import sqlite3
import stat
import time
from eventlet import sleep
from eventlet import timeout
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import excutils
from glance.common import exception
from glance.i18n import _, _LE, _LI, _LW
from glance.image_cache.drivers import base
LOG = logging.getLogger(__name__)
sqlite_opts = [
cfg.StrOpt('image_cache_sqlite_db', default='cache.db',
help=_('The path to the sqlite file database that will be '
'used for image cache management.')),
]
CONF = cfg.CONF
CONF.register_opts(sqlite_opts)
DEFAULT_SQL_CALL_TIMEOUT = 2
class SqliteConnection(sqlite3.Connection):
"""
SQLite DB Connection handler that plays well with eventlet,
slightly modified from Swift's similar code.
"""
def __init__(self, *args, **kwargs):
self.timeout_seconds = kwargs.get('timeout', DEFAULT_SQL_CALL_TIMEOUT)
kwargs['timeout'] = 0
sqlite3.Connection.__init__(self, *args, **kwargs)
def _timeout(self, call):
with timeout.Timeout(self.timeout_seconds):
while True:
try:
return call()
except sqlite3.OperationalError as e:
if 'locked' not in str(e):
raise
sleep(0.05)
def execute(self, *args, **kwargs):
return self._timeout(lambda: sqlite3.Connection.execute(
self, *args, **kwargs))
def commit(self):
return self._timeout(lambda: sqlite3.Connection.commit(self))
def dict_factory(cur, row):
return {col[0]: row[idx] for idx, col in enumerate(cur.description)}
class Driver(base.Driver):
"""
Cache driver that uses xattr file tags and requires a filesystem
that has atimes set.
"""
def configure(self):
"""
Configure the driver to use the stored configuration options
Any store that needs special configuration should implement
this method. If the store was not able to successfully configure
itself, it should raise `exception.BadDriverConfiguration`
"""
super(Driver, self).configure()
# Create the SQLite database that will hold our cache attributes
self.initialize_db()
def initialize_db(self):
db = CONF.image_cache_sqlite_db
self.db_path = os.path.join(self.base_dir, db)
try:
conn = sqlite3.connect(self.db_path, check_same_thread=False,
factory=SqliteConnection)
conn.executescript("""
CREATE TABLE IF NOT EXISTS cached_images (
image_id TEXT PRIMARY KEY,
last_accessed REAL DEFAULT 0.0,
last_modified REAL DEFAULT 0.0,
size INTEGER DEFAULT 0,
hits INTEGER DEFAULT 0,
checksum TEXT
);
""")
conn.close()
except sqlite3.DatabaseError as e:
msg = _("Failed to initialize the image cache database. "
"Got error: %s") % e
LOG.error(msg)
raise exception.BadDriverConfiguration(driver_name='sqlite',
reason=msg)
def get_cache_size(self):
"""
Returns the total size in bytes of the image cache.
"""
sizes = []
for path in self.get_cache_files(self.base_dir):
if path == self.db_path:
continue
file_info = os.stat(path)
sizes.append(file_info[stat.ST_SIZE])
return sum(sizes)
def get_hit_count(self, image_id):
"""
Return the number of hits that an image has.
:param image_id: Opaque image identifier
"""
if not self.is_cached(image_id):
return 0
hits = 0
with self.get_db() as db:
cur = db.execute("""SELECT hits FROM cached_images
WHERE image_id = ?""",
(image_id,))
hits = cur.fetchone()[0]
return hits
def get_cached_images(self):
"""
Returns a list of records about cached images.
"""
LOG.debug("Gathering cached image entries.")
with self.get_db() as db:
cur = db.execute("""SELECT
image_id, hits, last_accessed, last_modified, size
FROM cached_images
ORDER BY image_id""")
cur.row_factory = dict_factory
return [r for r in cur]
def is_cached(self, image_id):
"""
Returns True if the image with the supplied ID has its image
file cached.
:param image_id: Image ID
"""
return os.path.exists(self.get_image_filepath(image_id))
def is_cacheable(self, image_id):
"""
Returns True if the image with the supplied ID can have its
image file cached, False otherwise.
:param image_id: Image ID
"""
# Make sure we're not already cached or caching the image
return not (self.is_cached(image_id) or
self.is_being_cached(image_id))
def is_being_cached(self, image_id):
"""
Returns True if the image with supplied id is currently
in the process of having its image file cached.
:param image_id: Image ID
"""
path = self.get_image_filepath(image_id, 'incomplete')
return os.path.exists(path)
def is_queued(self, image_id):
"""
Returns True if the image identifier is in our cache queue.
:param image_id: Image ID
"""
path = self.get_image_filepath(image_id, 'queue')
return os.path.exists(path)
def delete_all_cached_images(self):
"""
Removes all cached image files and any attributes about the images
"""
deleted = 0
with self.get_db() as db:
for path in self.get_cache_files(self.base_dir):
delete_cached_file(path)
deleted += 1
db.execute("""DELETE FROM cached_images""")
db.commit()
return deleted
def delete_cached_image(self, image_id):
"""
Removes a specific cached image file and any attributes about the image
:param image_id: Image ID
"""
path = self.get_image_filepath(image_id)
with self.get_db() as db:
delete_cached_file(path)
db.execute("""DELETE FROM cached_images WHERE image_id = ?""",
(image_id, ))
db.commit()
def delete_all_queued_images(self):
"""
Removes all queued image files and any attributes about the images
"""
files = [f for f in self.get_cache_files(self.queue_dir)]
for file in files:
os.unlink(file)
return len(files)
def delete_queued_image(self, image_id):
"""
Removes a specific queued image file and any attributes about the image
:param image_id: Image ID
"""
path = self.get_image_filepath(image_id, 'queue')
if os.path.exists(path):
os.unlink(path)
def clean(self, stall_time=None):
"""
Delete any image files in the invalid directory and any
files in the incomplete directory that are older than a
configurable amount of time.
"""
self.delete_invalid_files()
if stall_time is None:
stall_time = CONF.image_cache_stall_time
now = time.time()
older_than = now - stall_time
self.delete_stalled_files(older_than)
def get_least_recently_accessed(self):
"""
Return a tuple containing the image_id and size of the least recently
accessed cached file, or None if no cached files.
"""
with self.get_db() as db:
cur = db.execute("""SELECT image_id FROM cached_images
ORDER BY last_accessed LIMIT 1""")
try:
image_id = cur.fetchone()[0]
except TypeError:
# There are no more cached images
return None
path = self.get_image_filepath(image_id)
try:
file_info = os.stat(path)
size = file_info[stat.ST_SIZE]
except OSError:
size = 0
return image_id, size
@contextmanager
def open_for_write(self, image_id):
"""
Open a file for writing the image file for an image
with supplied identifier.
:param image_id: Image ID
"""
incomplete_path = self.get_image_filepath(image_id, 'incomplete')
def commit():
with self.get_db() as db:
final_path = self.get_image_filepath(image_id)
LOG.debug("Fetch finished, moving "
"'%(incomplete_path)s' to '%(final_path)s'",
dict(incomplete_path=incomplete_path,
final_path=final_path))
os.rename(incomplete_path, final_path)
# Make sure that we "pop" the image from the queue...
if self.is_queued(image_id):
os.unlink(self.get_image_filepath(image_id, 'queue'))
filesize = os.path.getsize(final_path)
now = time.time()
db.execute("""INSERT INTO cached_images
(image_id, last_accessed, last_modified, hits, size)
VALUES (?, ?, ?, 0, ?)""",
(image_id, now, now, filesize))
db.commit()
def rollback(e):
with self.get_db() as db:
if os.path.exists(incomplete_path):
invalid_path = self.get_image_filepath(image_id, 'invalid')
LOG.warn(_LW("Fetch of cache file failed (%(e)s), rolling "
"back by moving '%(incomplete_path)s' to "
"'%(invalid_path)s'") %
{'e': e,
'incomplete_path': incomplete_path,
'invalid_path': invalid_path})
os.rename(incomplete_path, invalid_path)
db.execute("""DELETE FROM cached_images
WHERE image_id = ?""", (image_id, ))
db.commit()
try:
with open(incomplete_path, 'wb') as cache_file:
yield cache_file
except Exception as e:
with excutils.save_and_reraise_exception():
rollback(e)
else:
commit()
finally:
# if the generator filling the cache file neither raises an
# exception, nor completes fetching all data, neither rollback
# nor commit will have been called, so the incomplete file
# will persist - in that case remove it as it is unusable
# example: ^c from client fetch
if os.path.exists(incomplete_path):
rollback('incomplete fetch')
@contextmanager
def open_for_read(self, image_id):
"""
Open and yield file for reading the image file for an image
with supplied identifier.
:param image_id: Image ID
"""
path = self.get_image_filepath(image_id)
with open(path, 'rb') as cache_file:
yield cache_file
now = time.time()
with self.get_db() as db:
db.execute("""UPDATE cached_images
SET hits = hits + 1, last_accessed = ?
WHERE image_id = ?""",
(now, image_id))
db.commit()
@contextmanager
def get_db(self):
"""
Returns a context manager that produces a database connection that
self-closes and calls rollback if an error occurs while using the
database connection
"""
conn = sqlite3.connect(self.db_path, check_same_thread=False,
factory=SqliteConnection)
conn.row_factory = sqlite3.Row
conn.text_factory = str
conn.execute('PRAGMA synchronous = NORMAL')
conn.execute('PRAGMA count_changes = OFF')
conn.execute('PRAGMA temp_store = MEMORY')
try:
yield conn
except sqlite3.DatabaseError as e:
msg = _LE("Error executing SQLite call. Got error: %s") % e
LOG.error(msg)
conn.rollback()
finally:
conn.close()
def queue_image(self, image_id):
"""
This adds a image to be cache to the queue.
If the image already exists in the queue or has already been
cached, we return False, True otherwise
:param image_id: Image ID
"""
if self.is_cached(image_id):
LOG.info(_LI("Not queueing image '%s'. Already cached."), image_id)
return False
if self.is_being_cached(image_id):
LOG.info(_LI("Not queueing image '%s'. Already being "
"written to cache"), image_id)
return False
if self.is_queued(image_id):
LOG.info(_LI("Not queueing image '%s'. Already queued."), image_id)
return False
path = self.get_image_filepath(image_id, 'queue')
# Touch the file to add it to the queue
with open(path, "w"):
pass
return True
def delete_invalid_files(self):
"""
Removes any invalid cache entries
"""
for path in self.get_cache_files(self.invalid_dir):
os.unlink(path)
LOG.info(_LI("Removed invalid cache file %s"), path)
def delete_stalled_files(self, older_than):
"""
Removes any incomplete cache entries older than a
supplied modified time.
:param older_than: Files written to on or before this timestamp
will be deleted.
"""
for path in self.get_cache_files(self.incomplete_dir):
if os.path.getmtime(path) < older_than:
try:
os.unlink(path)
LOG.info(_LI("Removed stalled cache file %s"), path)
except Exception as e:
msg = (_LW("Failed to delete file %(path)s. "
"Got error: %(e)s"),
dict(path=path, e=e))
LOG.warn(msg)
def get_queued_images(self):
"""
Returns a list of image IDs that are in the queue. The
list should be sorted by the time the image ID was inserted
into the queue.
"""
files = [f for f in self.get_cache_files(self.queue_dir)]
items = []
for path in files:
mtime = os.path.getmtime(path)
items.append((mtime, os.path.basename(path)))
items.sort()
return [image_id for (modtime, image_id) in items]
def get_cache_files(self, basepath):
"""
Returns cache files in the supplied directory
:param basepath: Directory to look in for cache files
"""
for fname in os.listdir(basepath):
path = os.path.join(basepath, fname)
if path != self.db_path and os.path.isfile(path):
yield path
def delete_cached_file(path):
if os.path.exists(path):
LOG.debug("Deleting image cache file '%s'", path)
os.unlink(path)
else:
LOG.warn(_LW("Cached image file '%s' doesn't exist, unable to"
" delete") % path)
| {
"content_hash": "121bf7ee76e5185034ac9417ab31407e",
"timestamp": "",
"source": "github",
"line_count": 474,
"max_line_length": 79,
"avg_line_length": 33.78902953586498,
"alnum_prop": 0.5427072927072927,
"repo_name": "klmitch/glance",
"id": "e30b59efa51cbcb99df37c1dd66f43ebeb8dc8e2",
"size": "16652",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "glance/image_cache/drivers/sqlite.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "4140950"
},
{
"name": "Shell",
"bytes": "7753"
}
],
"symlink_target": ""
} |
"""
pycodeexport is a Python package for code generation.
"""
from __future__ import absolute_import, division, print_function
from ._release import __version__
from .dist import PCEExtension, pce_build_ext
| {
"content_hash": "7b72865ca5d08fd178ba2cbba0e133ff",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 64,
"avg_line_length": 29.714285714285715,
"alnum_prop": 0.7548076923076923,
"repo_name": "bjodah/pycodeexport",
"id": "f057ae9f07354f42e3d19eb168ec6f18b1fed964",
"size": "232",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pycodeexport/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "30796"
},
{
"name": "Shell",
"bytes": "8982"
}
],
"symlink_target": ""
} |
__author__ = 'techkid6'
""" Allows for modification of requests from proxy clients"""
def handle_request(request):
return None
""" Allows for modification of responses returned to proxy clients"""
def handle_response(response):
return None | {
"content_hash": "ab8243b8be203d38c1b104d545a5a8fe",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 69,
"avg_line_length": 27.666666666666668,
"alnum_prop": 0.7349397590361446,
"repo_name": "techkid6/pyproxy",
"id": "f650b8abe7018f4a9bbaf23d7211c98b2bb80898",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugins/demo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14010"
}
],
"symlink_target": ""
} |
import uuid
from architect.commands import partition
from django.db import migrations, models
def add_partitions(apps, schema_editor):
partition.run({'module': 'casexml.apps.phone.models'})
class Migration(migrations.Migration):
dependencies = [
('phone', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='SyncLogSQL',
fields=[
('synclog_id', models.UUIDField(unique=True, primary_key=True, default=uuid.uuid1().hex)),
('domain', models.CharField(db_index=True, null=True, blank=True, default=None, max_length=255)),
('user_id', models.CharField(db_index=True, default=None, max_length=255)),
('date', models.DateTimeField(db_index=True, null=True, blank=True)),
('previous_synclog_id', models.UUIDField(blank=True, default=None, max_length=255, null=True)),
('doc', models.JSONField()),
('log_format', models.CharField(
choices=[('legacy', 'legacy'), ('simplified', 'simplified'), ('livequery', 'livequery')],
default='legacy', max_length=10)),
('build_id', models.CharField(max_length=255, null=True, blank=True)),
('duration', models.PositiveIntegerField(null=True, blank=True)),
('last_submitted', models.DateTimeField(db_index=True, null=True, blank=True)),
('had_state_error', models.BooleanField(default=False)),
('error_date', models.DateTimeField(db_index=True, null=True, blank=True)),
('error_hash', models.CharField(max_length=255, null=True, blank=True)),
],
),
migrations.RunPython(add_partitions),
]
| {
"content_hash": "e239634b8b3e89b75d02cb679e3924ba",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 113,
"avg_line_length": 45.43589743589744,
"alnum_prop": 0.59255079006772,
"repo_name": "dimagi/commcare-hq",
"id": "5d20b11f8a96abbfb702f3d6cf2714870f856915",
"size": "1822",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "corehq/ex-submodules/casexml/apps/phone/migrations/0002_synclogsql.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
"""
Tests for dataset creation
"""
import random
import math
import unittest
import os
import numpy as np
import deepchem as dc
try:
import torch # noqa
PYTORCH_IMPORT_FAILED = False
except ImportError:
PYTORCH_IMPORT_FAILED = True
def load_solubility_data():
"""Loads solubility dataset"""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
def load_multitask_data():
"""Load example multitask data."""
current_dir = os.path.dirname(os.path.abspath(__file__))
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = [
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
]
input_file = os.path.join(current_dir,
"../../models/tests/multitask_example.csv")
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
return loader.create_dataset(input_file)
class TestTransformer(dc.trans.Transformer):
def transform_array(self, X, y, w, ids):
return (2 * X, 1.5 * y, w, ids)
def test_transform_disk():
"""Test that the transform() method works for DiskDatasets."""
dataset = load_solubility_data()
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
for parallel in (True, False):
transformed = dataset.transform(transformer, parallel=parallel)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_sparsify_and_densify():
"""Test that sparsify and densify work as inverses."""
# Test on identity matrix
num_samples = 10
num_features = num_samples
X = np.eye(num_samples)
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Generate random sparse features dataset
np.random.seed(123)
p = .05
X = np.random.binomial(1, p, size=(num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
# Test edge case with array of all zeros
X = np.zeros((num_samples, num_features))
X_sparse = dc.data.sparsify_features(X)
X_reconstructed = dc.data.densify_features(X_sparse, num_features)
np.testing.assert_array_equal(X, X_reconstructed)
def test_pad_features():
"""Test that pad_features pads features correctly."""
batch_size = 100
num_features = 10
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
X_out = dc.data.pad_features(batch_size, X_b)
assert len(X_out) == batch_size
def test_pad_batches():
"""Test that pad_batch pads batches correctly."""
batch_size = 100
num_features = 10
num_tasks = 5
# Test cases where n_samples < 2*n_samples < batch_size
n_samples = 29
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test cases where n_samples < batch_size
n_samples = 79
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case where n_samples == batch_size
n_samples = 100
X_b = np.zeros((n_samples, num_features))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for object featurization.
n_samples = 2
X_b = np.array([{"a": 1}, {"b": 2}])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case for more complicated object featurization
n_samples = 2
X_b = np.array([(1, {"a": 1}), (2, {"b": 2})])
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
# Test case with multidimensional data
n_samples = 50
num_atoms = 15
d = 3
X_b = np.zeros((n_samples, num_atoms, d))
y_b = np.zeros((n_samples, num_tasks))
w_b = np.zeros((n_samples, num_tasks))
ids_b = np.zeros((n_samples,))
X_out, y_out, w_out, ids_out = dc.data.pad_batch(batch_size, X_b, y_b, w_b,
ids_b)
assert len(X_out) == len(y_out) == len(w_out) == len(ids_out) == batch_size
def test_get_task_names():
"""Test that get_task_names returns correct task_names"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_task_names() == ["log-solubility"]
multitask_dataset = load_multitask_data()
assert sorted(multitask_dataset.get_task_names()) == sorted([
"task0", "task1", "task2", "task3", "task4", "task5", "task6", "task7",
"task8", "task9", "task10", "task11", "task12", "task13", "task14",
"task15", "task16"
])
def test_get_data_shape():
"""Test that get_data_shape returns currect data shape"""
solubility_dataset = load_solubility_data()
assert solubility_dataset.get_data_shape() == (1024,)
multitask_dataset = load_multitask_data()
assert multitask_dataset.get_data_shape() == (1024,)
def test_len():
"""Test that len(dataset) works."""
solubility_dataset = load_solubility_data()
assert len(solubility_dataset) == 10
def test_reshard():
"""Test that resharding the dataset works."""
solubility_dataset = load_solubility_data()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 1
solubility_dataset.reshard(shard_size=1)
assert solubility_dataset.get_shard_size() == 1
X_r, y_r, w_r, ids_r = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
assert solubility_dataset.get_number_shards() == 10
solubility_dataset.reshard(shard_size=10)
assert solubility_dataset.get_shard_size() == 10
X_rr, y_rr, w_rr, ids_rr = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
# Test first resharding worked
np.testing.assert_array_equal(X, X_r)
np.testing.assert_array_equal(y, y_r)
np.testing.assert_array_equal(w, w_r)
np.testing.assert_array_equal(ids, ids_r)
# Test second resharding worked
np.testing.assert_array_equal(X, X_rr)
np.testing.assert_array_equal(y, y_rr)
np.testing.assert_array_equal(w, w_rr)
np.testing.assert_array_equal(ids, ids_rr)
def test_complete_shuffle():
shard_sizes = [1, 2, 3, 4, 5]
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
res = dataset.complete_shuffle()
# approx 1/15! chance of equality
np.testing.assert_equal(np.any(np.not_equal(dataset.X, res.X)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.y, res.w)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.w, res.y)), True)
np.testing.assert_equal(np.any(np.not_equal(dataset.ids, res.ids)), True)
np.testing.assert_array_equal(
np.sort(dataset.X, axis=0), np.sort(res.X, axis=0))
np.testing.assert_array_equal(
np.sort(dataset.y, axis=0), np.sort(res.y, axis=0))
np.testing.assert_array_equal(
np.sort(dataset.w, axis=0), np.sort(res.w, axis=0))
np.testing.assert_array_equal(np.sort(dataset.ids), np.sort(res.ids))
def test_iterbatches():
"""Test that iterating over batches of data works."""
solubility_dataset = load_solubility_data()
batch_size = 2
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
for (X_b, y_b, w_b, ids_b) in solubility_dataset.iterbatches(batch_size):
assert X_b.shape == (batch_size,) + data_shape
assert y_b.shape == (batch_size,) + (len(tasks),)
assert w_b.shape == (batch_size,) + (len(tasks),)
assert ids_b.shape == (batch_size,)
def test_itersamples_numpy():
"""Test that iterating over samples in a NumpyDataset works."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
for i, (sx, sy, sw, sid) in enumerate(dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_itersamples_disk():
"""Test that iterating over samples in a DiskDataset works."""
solubility_dataset = load_solubility_data()
X = solubility_dataset.X
y = solubility_dataset.y
w = solubility_dataset.w
ids = solubility_dataset.ids
for i, (sx, sy, sw, sid) in enumerate(solubility_dataset.itersamples()):
np.testing.assert_array_equal(sx, X[i])
np.testing.assert_array_equal(sy, y[i])
np.testing.assert_array_equal(sw, w[i])
np.testing.assert_array_equal(sid, ids[i])
def test_transform_numpy():
"""Test that the transform() method works for NumpyDatasets."""
num_datapoints = 100
num_features = 10
num_tasks = 10
# Generate data
X = np.random.rand(num_datapoints, num_features)
y = np.random.randint(2, size=(num_datapoints, num_tasks))
w = np.random.randint(2, size=(num_datapoints, num_tasks))
ids = np.array(["id"] * num_datapoints)
dataset = dc.data.NumpyDataset(X, y, w, ids)
# Transform it
transformer = TestTransformer(transform_X=True, transform_y=True)
transformed = dataset.transform(transformer)
np.testing.assert_array_equal(X, dataset.X)
np.testing.assert_array_equal(y, dataset.y)
np.testing.assert_array_equal(w, dataset.w)
np.testing.assert_array_equal(ids, dataset.ids)
np.testing.assert_array_equal(2 * X, transformed.X)
np.testing.assert_array_equal(1.5 * y, transformed.y)
np.testing.assert_array_equal(w, transformed.w)
np.testing.assert_array_equal(ids, transformed.ids)
def test_to_numpy():
"""Test that transformation to numpy arrays is sensible."""
solubility_dataset = load_solubility_data()
data_shape = solubility_dataset.get_data_shape()
tasks = solubility_dataset.get_task_names()
X, y, w, ids = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
N_samples = len(solubility_dataset)
N_tasks = len(tasks)
assert X.shape == (N_samples,) + data_shape
assert y.shape == (N_samples, N_tasks)
assert w.shape == (N_samples, N_tasks)
assert ids.shape == (N_samples,)
def test_consistent_ordering():
"""Test that ordering of labels is consistent over time."""
solubility_dataset = load_solubility_data()
ids1 = solubility_dataset.ids
ids2 = solubility_dataset.ids
assert np.array_equal(ids1, ids2)
def test_get_statistics():
"""Test statistics computation of this dataset."""
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
X_means, y_means = np.mean(X, axis=0), np.mean(y, axis=0)
X_stds, y_stds = np.std(X, axis=0), np.std(y, axis=0)
comp_X_means, comp_X_stds, comp_y_means, comp_y_stds = \
solubility_dataset.get_statistics()
np.testing.assert_allclose(comp_X_means, X_means)
np.testing.assert_allclose(comp_y_means, y_means)
np.testing.assert_allclose(comp_X_stds, X_stds)
np.testing.assert_allclose(comp_y_stds, y_stds)
def test_disk_iterate_batch_size():
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
batch_sizes = []
for X, y, _, _ in solubility_dataset.iterbatches(
3, epochs=2, pad_batches=False, deterministic=True):
batch_sizes.append(len(X))
assert [3, 3, 3, 1, 3, 3, 3, 1] == batch_sizes
def test_disk_pad_batches():
shard_sizes = [21, 11, 41, 21, 51]
batch_size = 10
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_ws = np.concatenate(all_ws, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=True, deterministic=True)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
total_size = sum(shard_sizes)
assert bidx == math.ceil(total_size / batch_size) - 1
expected_batches = math.ceil(total_size / batch_size) * batch_size
assert len(test_Xs) == expected_batches
assert len(test_ys) == expected_batches
assert len(test_ws) == expected_batches
assert len(test_ids) == expected_batches
np.testing.assert_array_equal(all_Xs, test_Xs[:total_size, :])
np.testing.assert_array_equal(all_ys, test_ys[:total_size, :])
np.testing.assert_array_equal(all_ws, test_ws[:total_size, :])
np.testing.assert_array_equal(all_ids, test_ids[:total_size])
def test_disk_iterate_y_w_None():
shard_sizes = [21, 11, 41, 21, 51]
batch_size = 10
all_Xs, all_ids = [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ids.append(ids_b)
yield X_b, None, None, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
test_Xs, test_ids = [], []
for bidx, (a, _, _, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=True, deterministic=True)):
test_Xs.append(a)
test_ids.append(d)
test_Xs = np.concatenate(test_Xs, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
total_size = sum(shard_sizes)
assert bidx == math.ceil(total_size / batch_size) - 1
expected_batches = math.ceil(total_size / batch_size) * batch_size
assert len(test_Xs) == expected_batches
assert len(test_ids) == expected_batches
np.testing.assert_array_equal(all_Xs, test_Xs[:total_size, :])
np.testing.assert_array_equal(all_ids, test_ids[:total_size])
def test_disk_iterate_batch():
all_batch_sizes = [None, 32, 17, 11]
all_shard_sizes = [[7, 3, 12, 4, 5], [1, 1, 1, 1, 1], [31, 31, 31, 31, 31],
[21, 11, 41, 21, 51]]
for idx in range(25):
shard_length = random.randint(1, 32)
shard_sizes = []
for _ in range(shard_length):
shard_sizes.append(random.randint(1, 128))
all_shard_sizes.append(shard_sizes)
if idx == 0:
# special case to test
all_batch_sizes.append(None)
else:
all_batch_sizes.append(random.randint(1, 256))
for shard_sizes, batch_size in zip(all_shard_sizes, all_batch_sizes):
all_Xs, all_ys, all_ws, all_ids = [], [], [], []
def shard_generator():
for sz in shard_sizes:
X_b = np.random.rand(sz, 1)
y_b = np.random.rand(sz, 1)
w_b = np.random.rand(sz, 1)
ids_b = np.random.rand(sz)
all_Xs.append(X_b)
all_ys.append(y_b)
all_ws.append(w_b)
all_ids.append(ids_b)
yield X_b, y_b, w_b, ids_b
dataset = dc.data.DiskDataset.create_dataset(shard_generator())
all_Xs = np.concatenate(all_Xs, axis=0)
all_ys = np.concatenate(all_ys, axis=0)
all_ws = np.concatenate(all_ws, axis=0)
all_ids = np.concatenate(all_ids, axis=0)
total_size = sum(shard_sizes)
assert dataset.X.shape[0] == total_size
# deterministic
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=False, deterministic=True)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
if batch_size is None:
for idx, (tx, ty, tw, tids) in enumerate(
zip(test_Xs, test_ys, test_ws, test_ids)):
assert len(tx) == shard_sizes[idx]
assert len(ty) == shard_sizes[idx]
assert len(tw) == shard_sizes[idx]
assert len(tids) == shard_sizes[idx]
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
if batch_size is None:
assert bidx == len(shard_sizes) - 1
else:
assert bidx == math.ceil(total_size / batch_size) - 1
np.testing.assert_array_equal(all_Xs, test_Xs)
np.testing.assert_array_equal(all_ys, test_ys)
np.testing.assert_array_equal(all_ws, test_ws)
np.testing.assert_array_equal(all_ids, test_ids)
# non-deterministic
test_Xs, test_ys, test_ws, test_ids = [], [], [], []
for bidx, (a, b, c, d) in enumerate(
dataset.iterbatches(
batch_size=batch_size, pad_batches=False, deterministic=False)):
test_Xs.append(a)
test_ys.append(b)
test_ws.append(c)
test_ids.append(d)
# we don't know the order in which the shards are iterated in.
test_Xs = np.concatenate(test_Xs, axis=0)
test_ys = np.concatenate(test_ys, axis=0)
test_ws = np.concatenate(test_ws, axis=0)
test_ids = np.concatenate(test_ids, axis=0)
if batch_size is None:
assert bidx == len(shard_sizes) - 1
else:
assert bidx == math.ceil(total_size / batch_size) - 1
np.testing.assert_array_equal(
np.sort(all_Xs, axis=0), np.sort(test_Xs, axis=0))
np.testing.assert_array_equal(
np.sort(all_ys, axis=0), np.sort(test_ys, axis=0))
np.testing.assert_array_equal(
np.sort(all_ws, axis=0), np.sort(test_ws, axis=0))
np.testing.assert_array_equal(
np.sort(all_ids, axis=0), np.sort(test_ids, axis=0))
def test_merge():
"""Test that dataset merge works."""
num_datapoints = 10
num_features = 10
num_tasks = 1
num_datasets = 4
datasets = []
for i in range(num_datasets):
Xi = np.random.rand(num_datapoints, num_features)
yi = np.random.randint(2, size=(num_datapoints, num_tasks))
wi = np.ones((num_datapoints, num_tasks))
idsi = np.array(["id"] * num_datapoints)
dataseti = dc.data.DiskDataset.from_numpy(Xi, yi, wi, idsi)
datasets.append(dataseti)
new_data = dc.data.datasets.DiskDataset.merge(datasets)
# Check that we have all the data in
assert new_data.X.shape == (num_datapoints * num_datasets, num_features)
assert new_data.y.shape == (num_datapoints * num_datasets, num_tasks)
assert len(new_data.tasks) == len(datasets[0].tasks)
def test_make_tf_dataset():
"""Test creating a Tensorflow Iterator from a Dataset."""
X = np.random.random((100, 5))
y = np.random.random((100, 1))
dataset = dc.data.NumpyDataset(X, y)
iterator = dataset.make_tf_dataset(
batch_size=10, epochs=2, deterministic=True)
for i, (batch_X, batch_y, batch_w) in enumerate(iterator):
offset = (i % 10) * 10
np.testing.assert_array_equal(X[offset:offset + 10, :], batch_X)
np.testing.assert_array_equal(y[offset:offset + 10, :], batch_y)
np.testing.assert_array_equal(np.ones((10, 1)), batch_w)
assert i == 19
def _validate_pytorch_dataset(dataset):
X = dataset.X
y = dataset.y
w = dataset.w
ids = dataset.ids
n_samples = X.shape[0]
# Test iterating in order.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=True)
for i, (iter_X, iter_y, iter_w, iter_id) in enumerate(ds):
j = i % n_samples
np.testing.assert_array_equal(X[j, :], iter_X)
np.testing.assert_array_equal(y[j, :], iter_y)
np.testing.assert_array_equal(w[j, :], iter_w)
assert ids[j] == iter_id
assert i == 2 * n_samples - 1
# Test iterating out of order.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False)
id_to_index = dict((id, i) for i, id in enumerate(ids))
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in ds:
j = id_to_index[iter_id]
np.testing.assert_array_equal(X[j, :], iter_X)
np.testing.assert_array_equal(y[j, :], iter_y)
np.testing.assert_array_equal(w[j, :], iter_w)
id_count[iter_id] += 1
assert all(id_count[id] == 2 for id in ids)
# Test iterating in batches.
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False, batch_size=7)
id_to_index = dict((id, i) for i, id in enumerate(ids))
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in ds:
size = len(iter_id)
assert size <= 7
for i in range(size):
j = id_to_index[iter_id[i]]
np.testing.assert_array_equal(X[j, :], iter_X[i])
np.testing.assert_array_equal(y[j, :], iter_y[i])
np.testing.assert_array_equal(w[j, :], iter_w[i])
id_count[iter_id[i]] += 1
assert all(id_count[id] == 2 for id in ids)
# Test iterating with multiple workers.
import torch # noqa
ds = dataset.make_pytorch_dataset(epochs=2, deterministic=False)
loader = torch.utils.data.DataLoader(ds, num_workers=3)
id_count = dict((id, 0) for id in ids)
for iter_X, iter_y, iter_w, iter_id in loader:
j = id_to_index[iter_id[0]]
np.testing.assert_array_equal(X[j, :], iter_X[0])
np.testing.assert_array_equal(y[j, :], iter_y[0])
np.testing.assert_array_equal(w[j, :], iter_w[0])
id_count[iter_id[0]] += 1
assert all(id_count[id] == 2 for id in ids)
def test_dataframe():
"""Test converting between Datasets and DataFrames."""
dataset = load_solubility_data()
# A round trip from Dataset to DataFrame to Dataset should produce identical arrays.
df = dataset.to_dataframe()
dataset2 = dc.data.Dataset.from_dataframe(df)
np.testing.assert_array_equal(dataset.X, dataset2.X)
np.testing.assert_array_equal(dataset.y, dataset2.y)
np.testing.assert_array_equal(dataset.w, dataset2.w)
np.testing.assert_array_equal(dataset.ids, dataset2.ids)
# Try specifying particular columns.
dataset3 = dc.data.Dataset.from_dataframe(
df, X=['X2', 'X4'], y='w', w=['y', 'X1'])
np.testing.assert_array_equal(dataset.X[:, (1, 3)], dataset3.X)
np.testing.assert_array_equal(dataset.w, dataset3.y)
np.testing.assert_array_equal(
np.stack([dataset.y[:, 0], dataset.X[:, 0]], axis=1), dataset3.w)
def test_to_str():
"""Tests to string representation of Dataset."""
dataset = dc.data.NumpyDataset(
X=np.random.rand(5, 3), y=np.random.rand(5,), ids=np.arange(5))
ref_str = '<NumpyDataset X.shape: (5, 3), y.shape: (5,), w.shape: (5,), ids: [0 1 2 3 4], task_names: [0]>'
assert str(dataset) == ref_str
# Test id shrinkage
dc.utils.set_print_threshold(10)
dataset = dc.data.NumpyDataset(
X=np.random.rand(50, 3), y=np.random.rand(50,), ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50,), w.shape: (50,), ids: [0 1 2 ... 47 48 49], task_names: [0]>'
assert str(dataset) == ref_str
# Test task shrinkage
dataset = dc.data.NumpyDataset(
X=np.random.rand(50, 3), y=np.random.rand(50, 20), ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50, 20), w.shape: (50, 1), ids: [0 1 2 ... 47 48 49], task_names: [ 0 1 2 ... 17 18 19]>'
assert str(dataset) == ref_str
# Test max print size
dc.utils.set_max_print_size(25)
dataset = dc.data.NumpyDataset(
X=np.random.rand(50, 3), y=np.random.rand(50,), ids=np.arange(50))
ref_str = '<NumpyDataset X.shape: (50, 3), y.shape: (50,), w.shape: (50,), task_names: [0]>'
assert str(dataset) == ref_str
class TestDatasets(unittest.TestCase):
"""
Test basic top-level API for dataset objects.
"""
def test_numpy_iterate_batch_size(self):
solubility_dataset = load_solubility_data()
X, y, _, _ = (solubility_dataset.X, solubility_dataset.y,
solubility_dataset.w, solubility_dataset.ids)
solubility_dataset = dc.data.NumpyDataset.from_DiskDataset(
solubility_dataset)
batch_sizes = []
for X, y, _, _ in solubility_dataset.iterbatches(
3, epochs=2, pad_batches=False, deterministic=True):
batch_sizes.append(len(X))
self.assertEqual([3, 3, 3, 1, 3, 3, 3, 1], batch_sizes)
@unittest.skipIf(PYTORCH_IMPORT_FAILED, 'PyTorch is not installed')
def test_make_pytorch_dataset_from_numpy(self):
"""Test creating a PyTorch Dataset from a NumpyDataset."""
X = np.random.random((100, 5))
y = np.random.random((100, 1))
ids = [str(i) for i in range(100)]
dataset = dc.data.NumpyDataset(X, y, ids=ids)
_validate_pytorch_dataset(dataset)
@unittest.skipIf(PYTORCH_IMPORT_FAILED, 'PyTorch is not installed')
def test_make_pytorch_dataset_from_images(self):
"""Test creating a PyTorch Dataset from an ImageDataset."""
path = os.path.join(os.path.dirname(__file__), 'images')
files = [os.path.join(path, f) for f in os.listdir(path)]
y = np.random.random((10, 1))
ids = [str(i) for i in range(len(files))]
dataset = dc.data.ImageDataset(files, y, ids=ids)
_validate_pytorch_dataset(dataset)
@unittest.skipIf(PYTORCH_IMPORT_FAILED, 'PyTorch is not installed')
def test_make_pytorch_dataset_from_disk(self):
"""Test creating a PyTorch Dataset from a DiskDataset."""
dataset = load_solubility_data()
_validate_pytorch_dataset(dataset)
| {
"content_hash": "f3675e6460b4d5fe20611457d876ea74",
"timestamp": "",
"source": "github",
"line_count": 846,
"max_line_length": 146,
"avg_line_length": 34.45390070921986,
"alnum_prop": 0.6424454508027995,
"repo_name": "lilleswing/deepchem",
"id": "711529e2143ea7618f3e005a559b4f71d078f365",
"size": "29148",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deepchem/data/tests/test_datasets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "16453"
},
{
"name": "Dockerfile",
"bytes": "794"
},
{
"name": "HTML",
"bytes": "20618"
},
{
"name": "Jupyter Notebook",
"bytes": "59756"
},
{
"name": "Python",
"bytes": "2597968"
},
{
"name": "Shell",
"bytes": "11491"
}
],
"symlink_target": ""
} |
import logging
from django.utils import timezone
from quickstats.models import Widget
logger = logging.getLogger(__name__)
def quick_record(owner, value, **kwargs):
labels = kwargs.pop("labels", {})
defaults = kwargs.setdefault("defaults", {})
defaults.setdefault("type", "chart")
defaults.setdefault("value", value)
# If we get a metric, we both want to set it
# as one of our labels, but also default it to
# the title we use for our widget
if "metric" in kwargs:
metric = kwargs.pop("metric")
labels.setdefault("__name__", metric)
defaults.setdefault("title", metric)
if "timestamp" in kwargs:
timestamp = kwargs.pop("timestamp")
defaults.setdefault("timestamp", timestamp)
else:
timestamp = defaults.setdefault("timestamp", timezone.now())
widget, _ = Widget.objects.lookup_or_create(owner=owner, labels=labels, **kwargs)
sample = widget.sample_set.create(timestamp=timestamp, value=value)
logger.debug("Created sample %r" % sample)
return sample
| {
"content_hash": "fb73951745b84979a412a802c0f09c6c",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 85,
"avg_line_length": 32.21212121212121,
"alnum_prop": 0.6698024459078081,
"repo_name": "kfdm/django-simplestats",
"id": "fecaeed496706b2bc426e571ff8629325ff811e2",
"size": "1063",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quickstats/shortcuts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "563"
},
{
"name": "HTML",
"bytes": "22307"
},
{
"name": "Makefile",
"bytes": "177"
},
{
"name": "Python",
"bytes": "90170"
}
],
"symlink_target": ""
} |
""" Demo: `page_a` and `page_b` are rendered into `page_all`.
Note: both `page_a` and `page_b` extend same `master`, inheritance
chains intersect at `content` placeholder.
"""
import unittest
from wheezy.template.engine import Engine
from wheezy.template.ext.core import CoreExtension
from wheezy.template.loader import DictLoader
master = """
@def content():
@end
@content()
"""
page_a = """
@extends('master')
@def content():
a
@end
"""
page_b = """
@extends('master')
@def content():
b
@end
"""
page_all = """
@_r('page_a', ctx, {}, {})
@_r('page_b', ctx, {}, {})
"""
pages = {
'master': master,
'page_a': page_a,
'page_b': page_b,
'page_all': page_all
}
engine = Engine(
loader=DictLoader(pages),
extensions=[CoreExtension()]
)
class TestCase(unittest.TestCase):
def test_render(self):
template = engine.get_template('page_all')
r = template.render({})
self.assertEqual(['a', 'b'], r.split())
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "7b8484970bb9a2b1d997ea6d806507a7",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 70,
"avg_line_length": 16.03125,
"alnum_prop": 0.6003898635477583,
"repo_name": "ezotrank/wheezy.template",
"id": "43886634f1a2454e5e619390bb2232f885090c27",
"size": "1026",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demos/include/render.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "636"
},
{
"name": "Makefile",
"bytes": "2504"
},
{
"name": "Python",
"bytes": "114858"
},
{
"name": "Shell",
"bytes": "212"
},
{
"name": "VimL",
"bytes": "758"
}
],
"symlink_target": ""
} |
from neo4jrestclient.client import GraphDatabase
import getpass
def flatten_dict(dictToFlatten):
#Pass through for now. Will flatten later (because neo4j doesn't like embedded dictionaries)
return dictToFlatten
class Neo4jLoader(object):
def __init__(self, url, username='neo4j', password=None):
if(not password):
password=getpass.getpass()
self.gdb = GraphDatabase(url, username=username, password=password)
def add_vertex(self, vertex):
properties = flatten_dict(vertex.properties)
jprops = str(properties).replace("{'","{").replace("':",":").replace(", '",", ")
queryString = "MERGE (a:%s {hashcode:'%s'}) ON CREATE SET a=%s ON MATCH SET a+=%s"%(vertex.label, vertex.properties['hashcode'], jprops, jprops)
return self.gdb.query(queryString)
def add_edge(self, edge):
properties = flatten_dict(edge.properties)
jprops = str(properties).replace("{'","{").replace("':",":").replace(", '",", ")
queryString = "MATCH (src:%s {hashcode:'%s'}), (dst:%s {hashcode:'%s'}) MERGE (src)-[:%s %s]->(dst)"%(edge.srcVertex.label, edge.srcVertex.id, edge.dstVertex.label, edge.dstVertex.id, edge.relationship, jprops)
return self.gdb.query(queryString)
def load_from_model(self, model):
for vid in model.vertices:
self.add_vertex(model.vertices[vid])
for eid in model.edges:
self.add_edge(model.edges[eid])
| {
"content_hash": "136c9bc05f8c7c2790565cf3b56319ab",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 218,
"avg_line_length": 47.03225806451613,
"alnum_prop": 0.6433470507544582,
"repo_name": "chroniccrash/c4rtographer",
"id": "439e3d5d6ccb8306d933439cf1e671f84e659aeb",
"size": "1458",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neo4jloader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "21720"
}
],
"symlink_target": ""
} |
import gtk
import viewer
reload(viewer)
class Application(gtk.Window):
def __init__(self, appname, size):
gtk.gdk.threads_init()
super(Application, self).__init__()
self.appname = appname
self.size = size
self.extensions = {}
def parse_framework(self, parent, frm):
self.extensions[frm.name] = viewer.get_view[frm.type](frm)
for ext in frm.subExtension:
self.extensions[ext.name] = viewer.get_view[ext.type](ext)
self.extensions[frm.name].attach(self.extensions[ext.name].widget, ext)
for view in frm.subFramework:
self.parse_framework(self.extensions[frm.name], view)
parent.attach(self.extensions[frm.name], frm)
def attach(self, widget, extension):
self.add(widget)
def start(self, main_framework):
self.parse_framework(self, main_framework)
self.set_size_request(int(self.size[0]), int(self.size[1]))
self.set_position(gtk.WIN_POS_CENTER)
self.set_title(self.appname)
# self.set_icon_from_file(icon)
self.connect("destroy", self.destroy)
self.show_all()
gtk.threads_enter()
gtk.main()
gtk.threads_leave()
def destroy(self, widget):
gtk.main_quit()
| {
"content_hash": "cf222c062abbf4026495ba6958d0c63e",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 83,
"avg_line_length": 32.87179487179487,
"alnum_prop": 0.6185647425897036,
"repo_name": "2000jedi/WebViewer",
"id": "89442522004139b98f5eff66fe987c70fac99a08",
"size": "1282",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Python/application.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "11659"
}
],
"symlink_target": ""
} |
from .grammar import SchemaGrammar
class PostgresSchemaGrammar(SchemaGrammar):
_modifiers = ['increment', 'nullable', 'default']
_serials = ['big_integer', 'integer',
'medium_integer', 'small_integer', 'tiny_integer']
marker = '%s'
def compile_rename_column(self, blueprint, command, connection):
"""
Compile a rename column command.
:param blueprint: The blueprint
:type blueprint: Blueprint
:param command: The command
:type command: Fluent
:param connection: The connection
:type connection: orator.connections.Connection
:rtype: list
"""
table = self.get_table_prefix() + blueprint.get_table()
column = self.wrap(command.from_)
return 'ALTER TABLE %s RENAME COLUMN %s TO %s'\
% (table, column, self.wrap(command.to))
def compile_table_exists(self):
"""
Compile the query to determine if a table exists
:rtype: str
"""
return 'SELECT * ' \
'FROM information_schema.tables ' \
'WHERE table_name = %(marker)s' \
% {'marker': self.get_marker()}
def compile_column_exists(self, table):
"""
Compile the query to determine the list of columns.
"""
return 'SELECT column_name ' \
'FROM information_schema.columns ' \
'WHERE table_name = \'%s\'' % table
def compile_create(self, blueprint, command, _):
"""
Compile a create table command.
"""
columns = ', '.join(self._get_columns(blueprint))
return 'CREATE TABLE %s (%s)' % (self.wrap_table(blueprint), columns)
def compile_add(self, blueprint, command, _):
table = self.wrap_table(blueprint)
columns = self.prefix_list('ADD COLUMN', self._get_columns(blueprint))
return 'ALTER TABLE %s %s' % (table, ', '.join(columns))
def compile_primary(self, blueprint, command, _):
columns = self.columnize(command.columns)
return 'ALTER TABLE %s ADD PRIMARY KEY (%s)'\
% (self.wrap_table(blueprint), columns)
def compile_unique(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s ADD CONSTRAINT %s UNIQUE (%s)'\
% (table, command.index, columns)
def compile_index(self, blueprint, command, _):
columns = self.columnize(command.columns)
table = self.wrap_table(blueprint)
return 'CREATE INDEX %s ON %s (%s)' % (command.index, table, columns)
def compile_drop(self, blueprint, command, _):
return 'DROP TABLE %s' % self.wrap_table(blueprint)
def compile_drop_if_exists(self, blueprint, command, _):
return 'DROP TABLE IF EXISTS %s' % self.wrap_table(blueprint)
def compile_drop_column(self, blueprint, command, connection):
columns = self.prefix_list(
'DROP COLUMN', self.wrap_list(command.columns))
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s %s' % (table, ', '.join(columns))
def compile_drop_primary(self, blueprint, command, _):
table = blueprint.get_table()
return 'ALTER TABLE %s DROP CONSTRAINT %s_pkey'\
% (self.wrap_table(blueprint), table)
def compile_drop_unique(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s DROP CONSTRAINT %s' % (table, command.index)
def compile_drop_index(self, blueprint, command, _):
return 'DROP INDEX %s' % command.index
def compile_drop_foreign(self, blueprint, command, _):
table = self.wrap_table(blueprint)
return 'ALTER TABLE %s DROP CONSTRAINT %s' % (table, command.index)
def compile_rename(self, blueprint, command, _):
from_ = self.wrap_table(blueprint)
return 'ALTER TABLE %s RENAME TO %s' % (
from_, self.wrap_table(command.to))
def _type_char(self, column):
return "CHAR(%s)" % column.length
def _type_string(self, column):
return "VARCHAR(%s)" % column.length
def _type_text(self, column):
return 'TEXT'
def _type_medium_text(self, column):
return 'TEXT'
def _type_long_text(self, column):
return 'TEXT'
def _type_integer(self, column):
return 'SERIAL' if column.auto_increment else 'INTEGER'
def _type_big_integer(self, column):
return 'BIGSERIAL' if column.auto_increment else 'BIGINT'
def _type_medium_integer(self, column):
return 'SERIAL' if column.auto_increment else 'INTEGER'
def _type_tiny_integer(self, column):
return 'SMALLSERIAL' if column.auto_increment else 'SMALLINT'
def _type_small_integer(self, column):
return 'SMALLSERIAL' if column.auto_increment else 'SMALLINT'
def _type_float(self, column):
return self._type_double(column)
def _type_double(self, column):
return 'DOUBLE PRECISION'
def _type_decimal(self, column):
return 'DECIMAL(%s, %s)' % (column.total, column.places)
def _type_boolean(self, column):
return 'BOOLEAN'
def _type_enum(self, column):
allowed = list(map(lambda a: "'%s'" % a, column.allowed))
return 'VARCHAR(255) CHECK ("%s" IN (%s))' % (
column.name, ', '.join(allowed))
def _type_json(self, column):
return 'JSON'
def _type_date(self, column):
return 'DATE'
def _type_datetime(self, column):
return 'TIMESTAMP(6) WITHOUT TIME ZONE'
def _type_time(self, column):
return 'TIME(6) WITHOUT TIME ZONE'
def _type_timestamp(self, column):
if column.use_current:
return ('TIMESTAMP(6) WITHOUT TIME ZONE '
'DEFAULT CURRENT_TIMESTAMP(6)')
return 'TIMESTAMP(6) WITHOUT TIME ZONE'
def _type_binary(self, column):
return 'BYTEA'
def _modify_nullable(self, blueprint, column):
if column.get('nullable'):
return ' NULL'
return ' NOT NULL'
def _modify_default(self, blueprint, column):
if column.get('default') is not None:
return ' DEFAULT %s' % self._get_default_value(column.default)
return ''
def _modify_increment(self, blueprint, column):
if column.type in self._serials and column.auto_increment:
return ' PRIMARY KEY'
return ''
def _get_dbal_column_type(self, type_):
"""
Get the dbal column type.
:param type_: The fluent type
:type type_: str
:rtype: str
"""
type_ = type_.lower()
if type_ == 'enum':
return 'string'
return super()._get_dbal_column_type(type_)
def _list_tables(self):
sql = """\
SELECT c.relname AS "table_name"
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE c.relkind IN ('r','')
AND n.nspname <> 'pg_catalog'
AND n.nspname <> 'information_schema'
AND n.nspname !~ '^pg_toast'
AND pg_catalog.pg_table_is_visible(c.oid)
"""
return sql
def _list_columns(self, table_name):
sql = """\
SELECT column_name AS "name",
data_type AS "ttype",
COALESCE(numeric_precision, character_maximum_length) \
AS "precision",
is_nullable AS "nullable",
column_default AS "default"
FROM information_schema.columns
WHERE table_name = '{}'
""".format(table_name)
return sql
def _list_indexes(self, table_name):
sql = """\
SELECT indexname AS "name",
indexdef
FROM pg_indexes
WHERE tablename = '{}'
""".format(table_name)
return sql
def _show_index(self, index):
sql = """\
select
a.attname as column_name
from
pg_class t,
pg_class i,
pg_index ix,
pg_attribute a
where
t.oid = ix.indrelid
and i.oid = ix.indexrelid
and a.attrelid = t.oid
and a.attnum = ANY(ix.indkey)
and t.relkind = 'r'
and i.relname = '{}'
order by
t.relname,
i.relname;
""".format(index)
return sql
def _list_foreign_keys(self, table_name):
sql = """\
SELECT t2.oid::regclass::text AS "to_table",
a1.attname AS "column",
a2.attname AS "primary_key",
c.conname AS "name",
c.confupdtype AS "on_update",
c.confdeltype AS "on_delete"
FROM pg_constraint c
JOIN pg_class t1 ON c.conrelid = t1.oid
JOIN pg_class t2 ON c.confrelid = t2.oid
JOIN pg_attribute a1 ON a1.attnum = c.conkey[1]
AND a1.attrelid = t1.oid
JOIN pg_attribute a2 ON a2.attnum = c.confkey[1]
AND a2.attrelid = t2.oid
JOIN pg_namespace t3 ON c.connamespace = t3.oid
WHERE c.contype = 'f'
AND t1.relname = '{}'
ORDER BY c.conname
""".format(table_name)
return sql
| {
"content_hash": "3c04b56082c05ec09216a30236291fc0",
"timestamp": "",
"source": "github",
"line_count": 310,
"max_line_length": 78,
"avg_line_length": 30.838709677419356,
"alnum_prop": 0.5573221757322175,
"repo_name": "Hanaasagi/sorator",
"id": "da873fd969d26b48062d6330d499fa5b2c685ad6",
"size": "9585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "orator/schema/grammars/postgres_grammar.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "2023"
},
{
"name": "Python",
"bytes": "1070898"
}
],
"symlink_target": ""
} |
"""
Copyright 2017 Alexander Minyushkin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from gutenberg.acquire import load_etext
from gutenberg.cleanup import strip_headers
from functools import reduce
if __name__ == '__main__':
author = "Sheckley_Robert"
book_id = [33854, 9055, 29446, 29458, 29876, 32040, 29487, 29445, 32346, 29525, 51833, 32041, 50844, 51768, 20919, 51545, 29509, 29548, 29579 ]
author = "Best_books"
book_id = [1342,219,844,84,2542,5200,76,98,11,345,2701,2591,74,6130,1080,43,1400,174,158,1232,1661]
all_texts = reduce((lambda x, y: x + y), [ strip_headers(load_etext(id)).strip() for id in book_id])
f = open("../data/pg/" + author + ".txt", 'w')
f.write(all_texts)
f.close()
| {
"content_hash": "b555fe93fa882acdd225420680611cab",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 147,
"avg_line_length": 33.916666666666664,
"alnum_prop": 0.7125307125307125,
"repo_name": "Alexander-Minyushkin/aistreamer",
"id": "600272475a2d0bd50d3af91238f9e613bb19dd68",
"size": "1221",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "worker/text_from_gutenberg.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "728"
},
{
"name": "HTML",
"bytes": "5501"
},
{
"name": "Python",
"bytes": "60578"
},
{
"name": "Shell",
"bytes": "2289"
}
],
"symlink_target": ""
} |
from django.db.models import Q
from django.utils import timezone
from treebeard.mp_tree import MP_NodeQuerySet
from cms.publisher.query import PublisherQuerySet
from cms.exceptions import NoHomeFound
class PageQuerySet(PublisherQuerySet):
def on_site(self, site=None):
from cms.utils import get_current_site
if site is None:
site = get_current_site()
return self.filter(node__site=site)
def published(self, site=None, language=None):
now = timezone.now()
if language:
pub = self.on_site(site).filter(
Q(publication_date__lte=now) | Q(publication_date__isnull=True),
Q(publication_end_date__gt=now) | Q(publication_end_date__isnull=True),
title_set__published=True, title_set__language=language,
)
else:
pub = self.on_site(site).filter(
Q(publication_date__lte=now) | Q(publication_date__isnull=True),
Q(publication_end_date__gt=now) | Q(publication_end_date__isnull=True),
title_set__published=True,
)
return pub.exclude(title_set__publisher_state=4)
def get_home(self, site=None):
try:
home = self.published(site).distinct().get(is_home=True)
except self.model.DoesNotExist:
raise NoHomeFound('No Root page found. Publish at least one page!')
return home
def has_apphooks(self):
"""
Returns True if any page on this queryset has an apphook attached.
"""
return self.exclude(application_urls=None).exclude(application_urls='').exists()
class PageNodeQuerySet(MP_NodeQuerySet):
def get_descendants(self, parent=None):
if parent is None:
return self.all()
if parent.is_leaf():
# leaf nodes have no children
return self.none()
return self.filter(path__startswith=parent.path, depth__gte=parent.depth)
def delete_fast(self):
# calls django's delete instead of the one from treebeard
super(MP_NodeQuerySet, self).delete()
def root_only(self):
return self.filter(depth=1)
| {
"content_hash": "404ef677b6858f47773bbebd736f25ed",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 88,
"avg_line_length": 33.61538461538461,
"alnum_prop": 0.6210526315789474,
"repo_name": "benzkji/django-cms",
"id": "fd5494ab00893b80f6c27b5e9489f877033aed35",
"size": "2209",
"binary": false,
"copies": "4",
"ref": "refs/heads/develop",
"path": "cms/models/query.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "132972"
},
{
"name": "HTML",
"bytes": "201324"
},
{
"name": "JavaScript",
"bytes": "1238070"
},
{
"name": "Python",
"bytes": "2356866"
},
{
"name": "Shell",
"bytes": "447"
}
],
"symlink_target": ""
} |
import torch # for torch.cat and torch.zeros
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
from nhwc.conv import Conv2d_NHWC
from nhwc.batch_norm import BatchNorm2d_NHWC
from nhwc.max_pool import MaxPool2d_NHWC
# Group batch norm
from apex.parallel import SyncBatchNorm as gbn
# Persistent group BN for NHWC case
from apex.contrib.groupbn.batch_norm import BatchNorm2d_NHWC as gbn_persistent
import apex.parallel
__all__ = ['resnet']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
class Layers_NCHW:
Conv2d = nn.Conv2d
MaxPool = nn.MaxPool2d
BnAddRelu = None # will be assigned at construction
def __init__(self, bn_group, **kwargs):
super(Layers_NCHW, self).__init__()
self.nhwc = False
self.bn_group = bn_group
if (bn_group > 1):
bn_base = gbn
else:
bn_base = nn.BatchNorm2d
class BnAddRelu_(bn_base):
def __init__(self, planes, fuse_relu=False, bn_group=1):
if (bn_group > 1):
super(BnAddRelu_, self).__init__(
planes,
process_group=apex.parallel.create_syncbn_process_group(bn_group))
else:
super(BnAddRelu_, self).__init__(planes)
self.fuse_relu_flag = fuse_relu
def forward(self, x, z=None):
out = super().forward(x)
if z is not None:
out = out.add_(z)
if self.fuse_relu_flag:
out = out.relu_()
return out
# this is still Layers_NCHW::__init__
self.BnAddRelu = BnAddRelu_
def build_bn(self, planes, fuse_relu=False):
return self.BnAddRelu(planes, fuse_relu, self.bn_group)
class Layers_NHWC:
Conv2d = Conv2d_NHWC
MaxPool = MaxPool2d_NHWC
class BnAddRelu(gbn_persistent):
def __init__(self, planes, fuse_relu=False, bn_group=1):
super(Layers_NHWC.BnAddRelu, self).__init__(planes,
fuse_relu,
bn_group=bn_group)
def __init__(self, bn_group, **kwargs):
super(Layers_NHWC, self).__init__()
self.nhwc = True
self.bn_group = bn_group
def build_bn(self, planes, fuse_relu):
return self.BnAddRelu(planes, fuse_relu, self.bn_group)
def conv1x1(layer_types, in_planes, out_planes, stride=1):
"""1x1 convolution"""
return layer_types.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,
bias=False)
def conv3x3(layer_types, in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return layer_types.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, layerImpls, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(layerImpls, inplanes, planes, stride=stride)
self.bn1 = layerImpls.build_bn(planes, fuse_relu=True)
self.conv2 = conv3x3(layerImpls, planes, planes)
self.bn2 = layerImpls.build_bn(planes, fuse_relu=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
if self.downsample is not None:
residual = self.downsample(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.conv2(out)
out = self.bn2(out, residual)
return out
class ResNet(nn.Module):
def __init__(self, layerImpls, block, layers, num_classes=1000,
pad_input=False, ssd_mods=False, use_nhwc=False,
bn_group=1):
self.inplanes = 64
super(ResNet, self).__init__()
if pad_input:
input_channels = 4
else:
input_channels = 3
self.conv1 = layerImpls.Conv2d(input_channels, 64, kernel_size=7, stride=2,
padding=3, bias=False)
self.bn1 = layerImpls.build_bn(64, fuse_relu=True)
self.maxpool = layerImpls.MaxPool(kernel_size=3, stride=2, padding=1)
# Add conv{2,3,4}
self.layer1 = self._make_layer(layerImpls, block, 64, layers[0])
self.layer2 = self._make_layer(layerImpls, block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(layerImpls, block, 256, layers[2], stride=1)
# FIXME! This (a) fails for nhwc, and (b) is irrelevant if the user is
# also loading pretrained data (which we don't know about here, but
# know about in the caller (the "resnet()" function below).
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, layerImpls, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
layerImpls.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
layerImpls.build_bn(planes * block.expansion, fuse_relu=False),
)
layers = []
layers.append(block(layerImpls, self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(layerImpls, self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.classifier(x)
return x
def _transpose_state(state, pad_input=False):
for k in state.keys():
if len(state[k].shape) == 4:
if pad_input and "conv1.weight" in k and not 'layer' in k:
s = state[k].shape
state[k] = torch.cat([state[k], torch.zeros([s[0], 1, s[2], s[3]])], dim=1)
state[k] = state[k].permute(0, 2, 3, 1).contiguous()
return state
def resnet34(pretrained=False, nhwc=False, ssd_mods=False, **kwargs):
"""Constructs a ResNet model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
if nhwc:
layerImpls = Layers_NHWC(**kwargs)
else:
layerImpls = Layers_NCHW(**kwargs)
block = BasicBlock
layer_list = [3, 4, 6, 3]
model = ResNet(layerImpls, block, layer_list, ssd_mods=ssd_mods, use_nhwc=nhwc, **kwargs)
if pretrained:
orig_state_dict = model_zoo.load_url(model_urls['resnet34'])
# Modify the state dict to remove conv5 / layer4
state_dict = {k:orig_state_dict[k] for k in orig_state_dict if (not k.startswith('layer4') and not k.startswith('fc'))}
pad_input = kwargs.get('pad_input', False)
if nhwc:
state_dict = _transpose_state(state_dict, pad_input)
model.load_state_dict(state_dict)
return nn.Sequential(model.conv1, model.bn1, model.maxpool, model.layer1, model.layer2, model.layer3)
| {
"content_hash": "ca49e8cbdcc1e4dec919ae7bc7e5aec8",
"timestamp": "",
"source": "github",
"line_count": 220,
"max_line_length": 127,
"avg_line_length": 35.804545454545455,
"alnum_prop": 0.584105623968516,
"repo_name": "mlperf/training_results_v0.7",
"id": "0030d419034e826b4e41b2f0d3cc75e34cb3a57d",
"size": "8492",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Fujitsu/benchmarks/ssd/implementations/implementation_closed/resnet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "1731"
},
{
"name": "Awk",
"bytes": "14530"
},
{
"name": "Batchfile",
"bytes": "13130"
},
{
"name": "C",
"bytes": "172914"
},
{
"name": "C++",
"bytes": "13037795"
},
{
"name": "CMake",
"bytes": "113458"
},
{
"name": "CSS",
"bytes": "70255"
},
{
"name": "Clojure",
"bytes": "622652"
},
{
"name": "Cuda",
"bytes": "1974745"
},
{
"name": "Dockerfile",
"bytes": "149523"
},
{
"name": "Groovy",
"bytes": "160449"
},
{
"name": "HTML",
"bytes": "171537"
},
{
"name": "Java",
"bytes": "189275"
},
{
"name": "JavaScript",
"bytes": "98224"
},
{
"name": "Julia",
"bytes": "430755"
},
{
"name": "Jupyter Notebook",
"bytes": "11091342"
},
{
"name": "Lua",
"bytes": "17720"
},
{
"name": "MATLAB",
"bytes": "34903"
},
{
"name": "Makefile",
"bytes": "215967"
},
{
"name": "Perl",
"bytes": "1551186"
},
{
"name": "PowerShell",
"bytes": "13906"
},
{
"name": "Python",
"bytes": "36943114"
},
{
"name": "R",
"bytes": "134921"
},
{
"name": "Raku",
"bytes": "7280"
},
{
"name": "Ruby",
"bytes": "4930"
},
{
"name": "SWIG",
"bytes": "140111"
},
{
"name": "Scala",
"bytes": "1304960"
},
{
"name": "Shell",
"bytes": "1312832"
},
{
"name": "Smalltalk",
"bytes": "3497"
},
{
"name": "Starlark",
"bytes": "69877"
},
{
"name": "TypeScript",
"bytes": "243012"
}
],
"symlink_target": ""
} |
class TestRhsaCveNames(object):
def test_cve_names(self, monkeypatch, mock_post, mock_put, rhsa):
"""Verify that we have CVE names in our advisory (not None)"""
expected = 'CVE-2018-14649'
assert rhsa.cve_names == expected
| {
"content_hash": "55dbb0463335c3d80e49936445ab6024",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 70,
"avg_line_length": 42,
"alnum_prop": 0.6587301587301587,
"repo_name": "red-hat-storage/errata-tool",
"id": "00d942293f49a5168fca148099a4e095f83aa66d",
"size": "252",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "errata_tool/tests/test_rhsa_cve_list.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "723"
},
{
"name": "Python",
"bytes": "167318"
},
{
"name": "Shell",
"bytes": "663"
}
],
"symlink_target": ""
} |
from datetime import timedelta as td
import time
from unittest.mock import patch
from django.core import signing
from django.utils.timezone import now
from hc.test import BaseTestCase
class UnsubscribeReportsTestCase(BaseTestCase):
def test_it_unsubscribes(self):
self.profile.next_report_date = now()
self.profile.nag_period = td(hours=1)
self.profile.next_nag_date = now()
self.profile.save()
sig = signing.TimestampSigner(salt="reports").sign("alice")
url = "/accounts/unsubscribe_reports/%s/" % sig
r = self.client.post(url)
self.assertContains(r, "Unsubscribed")
self.profile.refresh_from_db()
self.assertEqual(self.profile.reports, "off")
self.assertIsNone(self.profile.next_report_date)
self.assertEqual(self.profile.nag_period.total_seconds(), 0)
self.assertIsNone(self.profile.next_nag_date)
def test_bad_signature_gets_rejected(self):
url = "/accounts/unsubscribe_reports/invalid/"
r = self.client.get(url)
self.assertContains(r, "Incorrect Link")
def test_it_serves_confirmation_form(self):
sig = signing.TimestampSigner(salt="reports").sign("alice")
url = "/accounts/unsubscribe_reports/%s/" % sig
r = self.client.get(url)
self.assertContains(r, "Please press the button below")
self.assertNotContains(r, "submit()")
def test_aged_signature_autosubmits(self):
with patch("django.core.signing.time") as mock_time:
mock_time.time.return_value = time.time() - 301
signer = signing.TimestampSigner(salt="reports")
sig = signer.sign("alice")
url = "/accounts/unsubscribe_reports/%s/" % sig
r = self.client.get(url)
self.assertContains(r, "Please press the button below")
self.assertContains(r, "submit()")
def test_it_handles_missing_user(self):
self.alice.delete()
sig = signing.TimestampSigner(salt="reports").sign("alice")
url = "/accounts/unsubscribe_reports/%s/" % sig
r = self.client.post(url)
self.assertContains(r, "Unsubscribed")
| {
"content_hash": "093d4939a975be7119d0f62be8416364",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 68,
"avg_line_length": 34.935483870967744,
"alnum_prop": 0.6523545706371191,
"repo_name": "iphoting/healthchecks",
"id": "f4cdb2fec6f4b1bd3b74ba91a0681163e2ceec95",
"size": "2166",
"binary": false,
"copies": "1",
"ref": "refs/heads/heroku",
"path": "hc/accounts/tests/test_unsubscribe_reports.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "64145"
},
{
"name": "Dockerfile",
"bytes": "939"
},
{
"name": "HTML",
"bytes": "595497"
},
{
"name": "JavaScript",
"bytes": "55883"
},
{
"name": "Less",
"bytes": "14135"
},
{
"name": "Python",
"bytes": "894208"
},
{
"name": "Shell",
"bytes": "4382"
}
],
"symlink_target": ""
} |
"""
========
Dispatch
========
Identical to django.dispatch module but adds few more features
"""
import django.dispatch
from celery import shared_task
def async_receiver(signal, sender=None, **kwargs):
"""
Decorator to perform django signal asynchronously using Celery. The function decorated with
this should be recognized by celery. django signal mechanism should be working normally and
no additional changes are required while using in-built signals or custom signals.
"""
def _decorator(func):
# Convert normal function to celery task
func_celery = shared_task(func, **kwargs)
# Connect to a signal
if isinstance(signal, (list, tuple)):
for s in signal:
# Weak is false as func_celery doesn't exists outside the closure scope. So cannot
# be referenced weakly and will be erased by garbage collector
s.connect(func_celery.delay, sender=sender)
else:
signal.connect(func_celery.delay, sender=sender)
# To let celery recognize normal function as celery task
return func_celery
return _decorator
def reducer(self):
return django.dispatch.Signal, (self.providing_args,)
#: Monkey patched Signal class to remove non-pickleable attribute
django.dispatch.Signal.__reduce__ = reducer
class Signal(django.dispatch.Signal):
"""
Base class for all custom signal.
The reason of overriding standard django Signal class is to accept sender in construction and
passing it ``Signal.sends()`` implicitly
"""
def __init__(self, providing_args=None, use_caching=False, sender=None):
self.sender = sender
super(Signal, self).__init__(providing_args=providing_args, use_caching=use_caching)
def send(self, **named):
super(Signal, self).send(sender=self.sender, **named)
| {
"content_hash": "c27cc02866809e47c4c9fe3e387504fe",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 98,
"avg_line_length": 32.44827586206897,
"alnum_prop": 0.6785334750265675,
"repo_name": "inabhi9/drf-ext",
"id": "9220615708b564058e75a55f667e5a264204e25a",
"size": "1882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "drf_ext/core/dispatch.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41247"
}
],
"symlink_target": ""
} |
import numpy as np
from collections import namedtuple
from pysc2.agents import base_agent
from pysc2.lib import actions
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.autograd as autograd
from torch.autograd import Variable
from torch.utils.data import DataLoader, Dataset
from starcraft_agents.a2c_model import A2CModel
from starcraft_agents.learning_agent import LearningAgent
from starcraft_agents.saved_actions import TrajectoryDataset
from torchnet.logger import VisdomPlotLogger, VisdomLogger
import torchnet as tnt
class A2CAgent(LearningAgent):
"""The start of a basic A2C agent for learning agents."""
def __init__(self, screen_width, screen_height, horizon,
num_processes=2,
fully_conv=False,
expirement_name="default_expirement",
learning_rate=7e-4,
value_coef=1.0,
entropy_coef=1e-4,
in_channels=8,
continue_training=False,
summary=None):
super(A2CAgent, self).__init__(expirement_name)
num_functions = len(actions.FUNCTIONS)
self.model = A2CModel(num_functions=num_functions,
expirement_name=expirement_name,
screen_width=screen_width,
screen_height=screen_height).cuda()
self.screen_width = screen_width
self.screen_height = screen_height
self.summary = summary
self.in_channels = in_channels
self.horizon = horizon
self.num_processes = num_processes
self.max_grad = 0.5
self.entropy_coef = entropy_coef
self.value_coef = value_coef
self.gamma = 0.95
self.tau = 0.97
self.saved_actions = TrajectoryDataset(self.horizon,
self.num_processes,
screen_width,
screen_height)
if continue_training:
self.model.load_state_dict(torch.load(f"./models/{expirement_name}.pth"))
self.model.eval()
print(f"learning rate set to: {learning_rate}")
self.optimizer = optim.Adam(self.model.parameters(),
lr=learning_rate)
self.final_rewards = torch.zeros(1, 1)
self.setup_loggers()
def setup_loggers(self):
# visdom setup
self.loss_meter = tnt.meter.AverageValueMeter()
self.loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Train Loss'})
self.pi_loss_meter = tnt.meter.AverageValueMeter()
self.pi_loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Policy Loss'})
self.xy_loss_meter = tnt.meter.AverageValueMeter()
self.xy_loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'XY Loss'})
self.value_loss_meter = tnt.meter.AverageValueMeter()
self.value_loss_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Value Loss'})
self.reward_meter = tnt.meter.AverageValueMeter()
self.reward_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Batch Reward'})
self.entropy_meter = tnt.meter.AverageValueMeter()
self.entropy_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Entropy'})
self.adv_meter = tnt.meter.AverageValueMeter()
self.adv_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': 'Advantage'})
self.episode_logger = VisdomPlotLogger('line',
env=self.expirement_name,
opts={'title': "Episode Score"})
self.episode_meter = tnt.meter.MovingAverageValueMeter(windowsize=3)
def finish_step(self):
self.saved_actions.step()
def reset_meters(self):
self.adv_meter.reset()
self.loss_meter.reset()
self.pi_loss_meter.reset()
self.value_loss_meter.reset()
self.entropy_meter.reset()
self.xy_loss_meter.reset()
def rollout(self):
self.reset_meters()
self.saved_actions.compute_advantages(self.gamma)
loader = DataLoader(self.saved_actions, batch_size=self.horizon, shuffle=True)
for screens, minimaps, games, actions, x1s, y1s, rewards, returns in loader:
values, lp, x_lp, y_lp, dist_entropy, spatial_entropy = self.model.evaluate_actions(
Variable(screens).cuda(),
Variable(minimaps).cuda(),
Variable(games).cuda(),
Variable(actions).cuda(),
Variable(x1s).cuda(),
Variable(y1s).cuda())
rewards_var = Variable(rewards).cuda()
returns_var = Variable(returns).cuda()
advs = (returns_var - values).data
advs_var = Variable(advs).cuda()
dist_entropy *= self.entropy_coef
spatial_entropy *= self.entropy_coef
pg_loss = ((lp + x_lp + y_lp) * advs_var).mean()
pg_loss -= dist_entropy
pg_loss -= spatial_entropy
vf_loss = (values - rewards_var).pow(2).mean() * self.value_coef
train_loss = pg_loss + vf_loss
self.optimizer.zero_grad()
nn.utils.clip_grad_norm(self.model.parameters(), self.max_grad)
train_loss.backward()
self.optimizer.step()
self.loss_meter.add(train_loss.data[0])
self.pi_loss_meter.add(pg_loss.data[0])
self.entropy_meter.add(dist_entropy.data[0] + spatial_entropy.data[0])
self.value_loss_meter.add(vf_loss.data[0])
self.reward_meter.add(rewards.sum())
self.adv_meter.add(advs.mean())
self.loss_logger.log(self.steps, self.loss_meter.value()[0])
self.pi_loss_logger.log(self.steps, self.pi_loss_meter.value()[0])
self.reward_logger.log(self.steps, self.reward_meter.value()[0])
self.entropy_logger.log(self.steps, self.entropy_meter.value()[0])
self.value_loss_logger.log(self.steps, self.value_loss_meter.value()[0])
self.adv_logger.log(self.steps, self.adv_meter.value()[0])
self.episode_logger.log(self.steps, self.episode_meter.value()[0])
| {
"content_hash": "5edf4b83f20f0533709c24b7f12b0085",
"timestamp": "",
"source": "github",
"line_count": 166,
"max_line_length": 96,
"avg_line_length": 44.25301204819277,
"alnum_prop": 0.5294037571467465,
"repo_name": "ShawnSpooner/starcraft_agents",
"id": "33da285498072eb0cb721d1cbdf2f66dbfce114a",
"size": "7346",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "starcraft_agents/a2c_agent.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "26860"
},
{
"name": "Shell",
"bytes": "188"
}
],
"symlink_target": ""
} |
__author__ = 'alicia.williams'
#Week Three: Pig Latin
#CIS-125 FA 2015
# File: PigLatinAssignment.py
# This program takes English words and translates them to Pig Latin.
def main():
print("This program translates an English word to Pig Latin. \n")
#Prompting the user to enter an English word to translate.
#To compensate for the case sensitivity, I created another variable with the
#lower string method and attached that to the eng variable to make the
#outputs all lowercase.
eng = input("Please enter an Engilsh word to translate: ")
pig = eng.lower()
vowel = "aeiouAEIOU"
#Translate the word into Pig Latin.
#Printing the translated word.
if pig[0] in vowel:
print(pig + "yay")
else:
print(pig[1:] + pig[0] + "ay")
main() | {
"content_hash": "be72406e20a31a2bab78c3b96e90f924",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 80,
"avg_line_length": 29.925925925925927,
"alnum_prop": 0.6683168316831684,
"repo_name": "ajanaew24/Week-Three-Assignment",
"id": "18bf0b6f0e386a0ad1dc8e4c4a7a83e81dac5c31",
"size": "808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PigLatinAssignment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1822"
}
],
"symlink_target": ""
} |
"""docstring"""
| {
"content_hash": "30c29c2885c71b49b5a9fe1f407e4ba9",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 15,
"avg_line_length": 16,
"alnum_prop": 0.5625,
"repo_name": "Titulacion-Sistemas/PythonTitulacion-EV",
"id": "fb1e2b64861dd3af1f1fa4cad9050569dcc75c11",
"size": "16",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "Lib/site-packages/pylint/test/input/func_reqattrs.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ASP",
"bytes": "2117"
},
{
"name": "C",
"bytes": "469338"
},
{
"name": "C++",
"bytes": "93276"
},
{
"name": "CSS",
"bytes": "173812"
},
{
"name": "JavaScript",
"bytes": "203291"
},
{
"name": "PowerShell",
"bytes": "8104"
},
{
"name": "Python",
"bytes": "17198855"
},
{
"name": "Shell",
"bytes": "2237"
},
{
"name": "TeX",
"bytes": "1527"
},
{
"name": "Visual Basic",
"bytes": "904"
},
{
"name": "XSLT",
"bytes": "154751"
}
],
"symlink_target": ""
} |
"""
This example shows how to use the FedEx RateRequest service.
The variables populated below represents the minimum required values.
You will need to fill all of these, or risk seeing a SchemaValidationError
exception thrown by suds.
TIP: Near the bottom of the module, see how to check the if the destination
is Out of Delivery Area (ODA).
"""
import logging
from example_config import CONFIG_OBJ
from fedex.services.rate_service import FedexRateServiceRequest
# Set this to the INFO level to see the response from Fedex printed in stdout.
logging.basicConfig(level=logging.INFO)
# This is the object that will be handling our tracking request.
# We're using the FedexConfig object from example_config.py in this dir.
rate_request = FedexRateServiceRequest(CONFIG_OBJ)
rate_request.RequestedShipment.ServiceType = 'FEDEX_FREIGHT_ECONOMY'
rate_request.RequestedShipment.DropoffType = 'REGULAR_PICKUP'
rate_request.RequestedShipment.PackagingType = 'YOUR_PACKAGING'
rate_request.RequestedShipment.FreightShipmentDetail.TotalHandlingUnits = 1
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightAccountNumber = CONFIG_OBJ.freight_account_number
rate_request.RequestedShipment.Shipper.Address.PostalCode = '72601'
rate_request.RequestedShipment.Shipper.Address.CountryCode = 'US'
rate_request.RequestedShipment.Shipper.Address.City = 'Harrison'
rate_request.RequestedShipment.Shipper.Address.StateOrProvinceCode = 'AR'
rate_request.RequestedShipment.Shipper.Address.Residential = False
rate_request.RequestedShipment.Recipient.Address.PostalCode = '72601'
rate_request.RequestedShipment.Recipient.Address.CountryCode = 'US'
rate_request.RequestedShipment.Recipient.Address.StateOrProvinceCode = 'AR'
rate_request.RequestedShipment.Recipient.Address.City = 'Harrison'
#include estimated duties and taxes in rate quote, can be ALL or NONE
rate_request.RequestedShipment.EdtRequestType = 'NONE'
# note: in order for this to work in test, you may need to use the
# specially provided LTL addresses emailed to you when signing up.
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.PersonName = 'Sender Name'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.CompanyName = 'Some Company'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Contact.PhoneNumber = '9012638716'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.StreetLines = ['2000 Freight LTL Testing']
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.City = 'Harrison'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.StateOrProvinceCode = 'AR'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.PostalCode = '72601'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.CountryCode = 'US'
rate_request.RequestedShipment.FreightShipmentDetail.FedExFreightBillingContactAndAddress.Address.Residential = False
spec = rate_request.create_wsdl_object_of_type('ShippingDocumentSpecification')
spec.ShippingDocumentTypes = [spec.CertificateOfOrigin]
rate_request.RequestedShipment.ShippingDocumentSpecification = spec
role = rate_request.create_wsdl_object_of_type('FreightShipmentRoleType')
rate_request.RequestedShipment.FreightShipmentDetail.Role = role.SHIPPER
# Designates the terms of the "collect" payment for a Freight
#Shipment. Can be NON_RECOURSE_SHIPPER_SIGNED or STANDARD
rate_request.RequestedShipment.FreightShipmentDetail.CollectTermsType = 'STANDARD'
package1_weight = rate_request.create_wsdl_object_of_type('Weight')
package1_weight.Value = 500.0
package1_weight.Units = "LB"
rate_request.RequestedShipment.FreightShipmentDetail.PalletWeight = package1_weight
package1 = rate_request.create_wsdl_object_of_type('FreightShipmentLineItem')
package1.Weight = package1_weight
package1.Packaging = 'PALLET'
package1.Description = 'Products'
package1.FreightClass = 'CLASS_500'
rate_request.RequestedShipment.FreightShipmentDetail.LineItems = package1
# If you'd like to see some documentation on the ship service WSDL, un-comment
# this line. (Spammy).
#print rate_request.client
# Un-comment this to see your complete, ready-to-send request as it stands
# before it is actually sent. This is useful for seeing what values you can
# change.
#print rate_request.RequestedShipment
# Fires off the request, sets the 'response' attribute on the object.
rate_request.send_request()
# This will show the reply to your rate_request being sent. You can access the
# attributes through the response attribute on the request object. This is
# good to un-comment to see the variables returned by the FedEx reply.
#print rate_request.response
# Here is the overall end result of the query.
print "HighestSeverity:", rate_request.response.HighestSeverity
# RateReplyDetails can contain rates for multiple ServiceTypes if ServiceType was set to None
for service in rate_request.response.RateReplyDetails:
for detail in service.RatedShipmentDetails:
for surcharge in detail.ShipmentRateDetail.Surcharges:
if surcharge.SurchargeType == 'OUT_OF_DELIVERY_AREA':
print "%s: ODA rate_request charge %s" % (service.ServiceType, surcharge.Amount.Amount)
for rate_detail in service.RatedShipmentDetails:
print "%s: Net FedEx Charge %s %s" % (service.ServiceType, rate_detail.ShipmentRateDetail.TotalNetFedExCharge.Currency,
rate_detail.ShipmentRateDetail.TotalNetFedExCharge.Amount)
| {
"content_hash": "f65e376a1b63f0ce269bdc5d40d603d8",
"timestamp": "",
"source": "github",
"line_count": 113,
"max_line_length": 140,
"avg_line_length": 50.54867256637168,
"alnum_prop": 0.820203081232493,
"repo_name": "AxiaCore/python-fedex",
"id": "0fe7d9746a64a16e99b8b7065bab5ced87d0fa45",
"size": "5734",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "examples/freight_rate_request.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "87999"
},
{
"name": "Shell",
"bytes": "45"
}
],
"symlink_target": ""
} |
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Queen'
copyright = u'2013, Kashif Malik'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Queendoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Queen.tex', u'Queen Documentation',
u'Kashif Malik', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'queen', u'Queen Documentation',
[u'Kashif Malik'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Queen', u'Queen Documentation',
u'Kashif Malik', 'Queen', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| {
"content_hash": "2b286b69b28b68356a8dbc9b566c3c36",
"timestamp": "",
"source": "github",
"line_count": 229,
"max_line_length": 80,
"avg_line_length": 31.903930131004365,
"alnum_prop": 0.7003832466465918,
"repo_name": "kalail/queen",
"id": "87d0a4036520b91f101d5ebe31e8c4d2d99069e8",
"size": "7722",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27344"
},
{
"name": "Shell",
"bytes": "5094"
}
],
"symlink_target": ""
} |
import unittest
from conans import tools
from conans.test.utils.tools import TestClient, TestServer
from conans.model.ref import ConanFileReference, PackageReference
import os
from conans.paths import EXPORT_SOURCES_TGZ_NAME, EXPORT_TGZ_NAME, EXPORT_SRC_FOLDER
from parameterized.parameterized import parameterized
from conans.util.files import load, save, md5sum
from conans.model.manifest import FileTreeManifest
from collections import OrderedDict
from conans.test.utils.test_files import scan_folder
conanfile_py = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports = "*.h", "*.cpp"
def package(self):
self.copy("*.h", "include")
"""
combined_conanfile = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "*.h", "*.cpp"
exports = "*.txt"
def package(self):
self.copy("*.h", "include")
self.copy("data.txt", "docs")
"""
nested_conanfile = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "src/*.h", "src/*.cpp"
exports = "src/*.txt"
def package(self):
self.copy("*.h", "include")
self.copy("*data.txt", "docs")
"""
overlap_conanfile = """
from conans import ConanFile
class HelloConan(ConanFile):
name = "Hello"
version = "0.1"
exports_sources = "src/*.h", "*.txt"
exports = "src/*.txt", "*.h"
def package(self):
self.copy("*.h", "include")
self.copy("*data.txt", "docs")
"""
class ExportsSourcesTest(unittest.TestCase):
def setUp(self):
self.server = TestServer()
self.other_server = TestServer()
servers = OrderedDict([("default", self.server),
("other", self.other_server)])
client = TestClient(servers=servers, users={"default": [("lasote", "mypass")],
"other": [("lasote", "mypass")]})
self.client = client
self.reference = ConanFileReference.loads("Hello/0.1@lasote/testing")
self.package_reference = PackageReference(self.reference,
"5ab84d6acfe1f23c4fae0ab88f26e3a396351ac9")
self.source_folder = self.client.client_cache.source(self.reference)
self.package_folder = self.client.client_cache.package(self.package_reference)
self.export_folder = self.client.client_cache.export(self.reference)
self.export_sources_folder = self.client.client_cache.export_sources(self.reference)
def _check_source_folder(self, mode):
""" Source folder MUST be always the same
"""
expected_sources = ["hello.h"]
if mode == "both":
expected_sources.append("data.txt")
if mode == "nested" or mode == "overlap":
expected_sources = ["src/hello.h", "src/data.txt"]
expected_sources = sorted(expected_sources)
self.assertEqual(scan_folder(self.source_folder), expected_sources)
def _check_package_folder(self, mode):
""" Package folder must be always the same (might have tgz after upload)
"""
if mode in ["exports", "exports_sources"]:
expected_package = ["conaninfo.txt", "conanmanifest.txt", "include/hello.h"]
if mode == "both":
expected_package = ["conaninfo.txt", "conanmanifest.txt", "include/hello.h",
"docs/data.txt"]
if mode == "nested" or mode == "overlap":
expected_package = ["conaninfo.txt", "conanmanifest.txt", "include/src/hello.h",
"docs/src/data.txt"]
self.assertEqual(scan_folder(self.package_folder), sorted(expected_package))
def _check_server_folder(self, mode, server=None):
if mode == "exports_sources":
expected_server = [EXPORT_SOURCES_TGZ_NAME, 'conanfile.py', 'conanmanifest.txt']
if mode == "exports":
expected_server = [EXPORT_TGZ_NAME, 'conanfile.py', 'conanmanifest.txt']
if mode == "both" or mode == "nested" or mode == "overlap":
expected_server = [EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME, 'conanfile.py',
'conanmanifest.txt']
server = server or self.server
self.assertEqual(scan_folder(server.paths.export(self.reference)), expected_server)
def _check_export_folder(self, mode, export_folder=None, export_src_folder=None):
if mode == "exports_sources":
expected_src_exports = ["hello.h"]
expected_exports = ['conanfile.py', 'conanmanifest.txt']
if mode == "exports":
expected_src_exports = []
expected_exports = ["hello.h", 'conanfile.py', 'conanmanifest.txt']
if mode == "both":
expected_src_exports = ["hello.h"]
expected_exports = ['conanfile.py', 'conanmanifest.txt', "data.txt"]
if mode == "nested":
expected_src_exports = ["src/hello.h"]
expected_exports = ["src/data.txt", 'conanfile.py', 'conanmanifest.txt']
if mode == "overlap":
expected_src_exports = ["src/hello.h", "src/data.txt"]
expected_exports = ["src/data.txt", "src/hello.h", 'conanfile.py', 'conanmanifest.txt']
self.assertEqual(scan_folder(export_folder or self.export_folder),
sorted(expected_exports))
self.assertEqual(scan_folder(export_src_folder or self.export_sources_folder),
sorted(expected_src_exports))
def _check_export_installed_folder(self, mode, reuploaded=False, updated=False):
""" Just installed, no EXPORT_SOURCES_DIR is present
"""
if mode == "exports_sources":
expected_exports = ['conanfile.py', 'conanmanifest.txt']
if mode == "both":
expected_exports = ['conanfile.py', 'conanmanifest.txt', "data.txt"]
if reuploaded:
expected_exports.append("conan_export.tgz")
if mode == "exports":
expected_exports = ['conanfile.py', 'conanmanifest.txt', "hello.h"]
if reuploaded:
expected_exports.append("conan_export.tgz")
if mode == "nested":
expected_exports = ['conanfile.py', 'conanmanifest.txt', "src/data.txt"]
if reuploaded:
expected_exports.append("conan_export.tgz")
if mode == "overlap":
expected_exports = ['conanfile.py', 'conanmanifest.txt', "src/data.txt", "src/hello.h"]
if reuploaded:
expected_exports.append("conan_export.tgz")
if updated:
expected_exports.append("license.txt")
self.assertEqual(scan_folder(self.export_folder), sorted(expected_exports))
self.assertFalse(os.path.exists(self.export_sources_folder))
def _check_export_uploaded_folder(self, mode, export_folder=None, export_src_folder=None):
if mode == "exports_sources":
expected_src_exports = ["hello.h"]
expected_exports = ['conanfile.py', 'conanmanifest.txt', EXPORT_SOURCES_TGZ_NAME]
if mode == "exports":
expected_src_exports = []
expected_exports = ["hello.h", 'conanfile.py', 'conanmanifest.txt', EXPORT_TGZ_NAME]
if mode == "both":
expected_src_exports = ["hello.h"]
expected_exports = ['conanfile.py', 'conanmanifest.txt', "data.txt",
EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME]
if mode == "nested":
expected_src_exports = ["src/hello.h"]
expected_exports = ["src/data.txt", 'conanfile.py', 'conanmanifest.txt',
EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME]
if mode == "overlap":
expected_src_exports = ["src/hello.h", "src/data.txt"]
expected_exports = ["src/data.txt", "src/hello.h", 'conanfile.py', 'conanmanifest.txt',
EXPORT_TGZ_NAME, EXPORT_SOURCES_TGZ_NAME]
export_folder = export_folder or self.export_folder
self.assertEqual(scan_folder(export_folder), sorted(expected_exports))
self.assertEqual(scan_folder(export_src_folder or self.export_sources_folder),
sorted(expected_src_exports))
def _check_manifest(self, mode):
manifest = load(os.path.join(self.client.current_folder,
".conan_manifests/Hello/0.1/lasote/testing/export/"
"conanmanifest.txt"))
if mode == "exports_sources":
self.assertIn("%s/hello.h: 5d41402abc4b2a76b9719d911017c592" % EXPORT_SRC_FOLDER,
manifest.splitlines())
elif mode == "exports":
self.assertIn("hello.h: 5d41402abc4b2a76b9719d911017c592",
manifest.splitlines())
elif mode == "both":
self.assertIn("data.txt: 8d777f385d3dfec8815d20f7496026dc", manifest.splitlines())
self.assertIn("%s/hello.h: 5d41402abc4b2a76b9719d911017c592" % EXPORT_SRC_FOLDER,
manifest.splitlines())
elif mode == "nested":
self.assertIn("src/data.txt: 8d777f385d3dfec8815d20f7496026dc",
manifest.splitlines())
self.assertIn("%s/src/hello.h: 5d41402abc4b2a76b9719d911017c592" % EXPORT_SRC_FOLDER,
manifest.splitlines())
else:
assert mode == "overlap"
self.assertIn("src/data.txt: 8d777f385d3dfec8815d20f7496026dc",
manifest.splitlines())
self.assertIn("src/hello.h: 5d41402abc4b2a76b9719d911017c592",
manifest.splitlines())
self.assertIn("%s/src/hello.h: 5d41402abc4b2a76b9719d911017c592" % EXPORT_SRC_FOLDER,
manifest.splitlines())
self.assertIn("%s/src/data.txt: 8d777f385d3dfec8815d20f7496026dc" % EXPORT_SRC_FOLDER,
manifest.splitlines())
def _create_code(self, mode):
if mode == "exports":
conanfile = conanfile_py
elif mode == "exports_sources":
conanfile = conanfile_py.replace("exports", "exports_sources")
elif mode == "both":
conanfile = combined_conanfile
elif mode == "nested":
conanfile = nested_conanfile
elif mode == "overlap":
conanfile = overlap_conanfile
if mode in ["nested", "overlap"]:
self.client.save({"conanfile.py": conanfile,
"src/hello.h": "hello",
"src/data.txt": "data"})
else:
self.client.save({"conanfile.py": conanfile,
"hello.h": "hello",
"data.txt": "data"})
@parameterized.expand([("exports", ), ("exports_sources", ), ("both", ), ("nested", ),
("overlap", )])
def copy_test(self, mode):
# https://github.com/conan-io/conan/issues/943
self._create_code(mode)
self.client.run("export . lasote/testing")
self.client.run("install Hello/0.1@lasote/testing --build=missing")
self.client.run("upload Hello/0.1@lasote/testing --all")
self.client.run('remove Hello/0.1@lasote/testing -f')
self.client.run("install Hello/0.1@lasote/testing")
# new copied package data
reference = ConanFileReference.loads("Hello/0.1@lasote/stable")
source_folder = self.client.client_cache.source(reference)
export_folder = self.client.client_cache.export(reference)
self.client.run("copy Hello/0.1@lasote/testing lasote/stable")
self._check_export_folder(mode, export_folder)
self.client.run("upload Hello/0.1@lasote/stable")
self.assertFalse(os.path.exists(source_folder))
self._check_export_uploaded_folder(mode, export_folder)
self._check_server_folder(mode)
@parameterized.expand([("exports", ), ("exports_sources", ), ("both", ), ("nested", ),
("overlap", )])
def export_test(self, mode):
self._create_code(mode)
self.client.run("export . lasote/testing")
self._check_export_folder(mode)
# now build package
self.client.run("install Hello/0.1@lasote/testing --build=missing")
# Source folder and package should be exatly the same
self._check_export_folder(mode)
self._check_source_folder(mode)
self._check_package_folder(mode)
# upload to remote
self.client.run("upload Hello/0.1@lasote/testing --all")
self._check_export_uploaded_folder(mode)
self._check_server_folder(mode)
# remove local
self.client.run('remove Hello/0.1@lasote/testing -f')
self.assertFalse(os.path.exists(self.export_folder))
# install from remote
self.client.run("install Hello/0.1@lasote/testing")
self.assertFalse(os.path.exists(self.source_folder))
self._check_export_installed_folder(mode)
self._check_package_folder(mode)
# Manifests must work too!
self.client.run("install Hello/0.1@lasote/testing --manifests")
self.assertFalse(os.path.exists(self.source_folder))
# The manifests retrieve the normal state, as it retrieves sources
self._check_export_folder(mode)
self._check_package_folder(mode)
self._check_manifest(mode)
# lets try to verify
self.client.run('remove Hello/0.1@lasote/testing -f')
self.assertFalse(os.path.exists(self.export_folder))
self.client.run("install Hello/0.1@lasote/testing --verify")
self.assertFalse(os.path.exists(self.source_folder))
# The manifests retrieve the normal state, as it retrieves sources
self._check_export_folder(mode)
self._check_package_folder(mode)
self._check_manifest(mode)
@parameterized.expand([("exports", ), ("exports_sources", ), ("both", ), ("nested", ),
("overlap", )])
def export_upload_test(self, mode):
self._create_code(mode)
self.client.run("export . lasote/testing")
self.client.run("upload Hello/0.1@lasote/testing")
self.assertFalse(os.path.exists(self.source_folder))
self._check_export_uploaded_folder(mode)
self._check_server_folder(mode)
# remove local
self.client.run('remove Hello/0.1@lasote/testing -f')
self.assertFalse(os.path.exists(self.export_folder))
# install from remote
self.client.run("install Hello/0.1@lasote/testing --build")
self._check_export_folder(mode)
self._check_source_folder(mode)
self._check_package_folder(mode)
# Manifests must work too!
self.client.run("install Hello/0.1@lasote/testing --manifests")
# The manifests retrieve the normal state, as it retrieves sources
self._check_export_folder(mode)
self._check_package_folder(mode)
self._check_manifest(mode)
@parameterized.expand([("exports", ), ("exports_sources", ), ("both", ), ("nested", ),
("overlap", )])
def reupload_test(self, mode):
""" try to reupload to same and other remote
"""
self._create_code(mode)
self.client.run("export . lasote/testing")
self.client.run("install Hello/0.1@lasote/testing --build=missing")
self.client.run("upload Hello/0.1@lasote/testing --all")
self.client.run('remove Hello/0.1@lasote/testing -f')
self.client.run("install Hello/0.1@lasote/testing")
# upload to remote again, the folder remains as installed
self.client.run("upload Hello/0.1@lasote/testing --all")
self._check_export_installed_folder(mode, reuploaded=True)
self._check_server_folder(mode)
self.client.run("upload Hello/0.1@lasote/testing --all -r=other")
self._check_export_uploaded_folder(mode)
self._check_server_folder(mode, self.other_server)
@parameterized.expand([("exports", ), ("exports_sources", ), ("both", ), ("nested", ),
("overlap", )])
def update_test(self, mode):
self._create_code(mode)
self.client.run("export . lasote/testing")
self.client.run("install Hello/0.1@lasote/testing --build=missing")
self.client.run("upload Hello/0.1@lasote/testing --all")
self.client.run('remove Hello/0.1@lasote/testing -f')
self.client.run("install Hello/0.1@lasote/testing")
# upload to remote again, the folder remains as installed
self.client.run("install Hello/0.1@lasote/testing --update")
self.assertIn("Hello/0.1@lasote/testing: Already installed!", self.client.user_io.out)
self._check_export_installed_folder(mode)
server_path = self.server.paths.export(self.reference)
save(os.path.join(server_path, "license.txt"), "mylicense")
manifest = FileTreeManifest.load(server_path)
manifest.time += 1
manifest.file_sums["license.txt"] = md5sum(os.path.join(server_path, "license.txt"))
manifest.save(server_path)
self.client.run("install Hello/0.1@lasote/testing --update")
self._check_export_installed_folder(mode, updated=True)
def exports_sources_old_c_src_test(self):
conanfile = """
import os
from conans import ConanFile
class HelloConan(ConanFile):
exports_sources = "*"
def build(self):
# won't be run in create but in the install from remote, we are emulating old .c_src
# in the package
if not os.environ.get("SKIP_THIS"):
# This dir has to exists after the install
assert(os.path.exists("modules/Hello/projects/Hello/myfile.txt"))
"""
# Fake old package layout with .c_src
self.client.save({"conanfile.py": conanfile,
".c_src/modules/Hello/projects/Hello/myfile.txt": "contents"})
with tools.environment_append({"SKIP_THIS": "1"}):
self.client.run("create . Hello/0.1@lasote/channel")
self.client.run("upload Hello/0.1@lasote/channel --all")
self.client.run('remove "*" -f')
self.client.run("install Hello/0.1@lasote/channel --build")
| {
"content_hash": "43bc353bd806db69ce346041df3456bf",
"timestamp": "",
"source": "github",
"line_count": 425,
"max_line_length": 99,
"avg_line_length": 43.510588235294115,
"alnum_prop": 0.6009625784122864,
"repo_name": "birsoyo/conan",
"id": "1145b0059c3b602c0f09fa277b8d723a32f1ec93",
"size": "18492",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "conans/test/integration/export_sources_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1100"
},
{
"name": "Groovy",
"bytes": "6251"
},
{
"name": "Python",
"bytes": "3101477"
},
{
"name": "Shell",
"bytes": "1864"
}
],
"symlink_target": ""
} |
from io import StringIO
from .output import Output
from .section_output import SectionOutput
class BufferedOutput(Output):
def __init__(self, decorated: bool = False, supports_utf8: bool = True) -> None:
super().__init__(decorated=decorated)
self._buffer = StringIO()
self._supports_utf8 = supports_utf8
def fetch(self) -> str:
"""
Empties the buffer and returns its content.
"""
content = self._buffer.getvalue()
self._buffer = StringIO()
return content
def clear(self) -> None:
"""
Empties the buffer.
"""
self._buffer = StringIO()
def supports_utf8(self) -> bool:
return self._supports_utf8
def set_supports_utf8(self, supports_utf8: bool) -> None:
self._supports_utf8 = supports_utf8
def section(self) -> SectionOutput:
return SectionOutput(
self._buffer,
self._section_outputs,
verbosity=self.verbosity,
decorated=self.is_decorated(),
formatter=self.formatter,
)
def _write(self, message: str, new_line: bool = False) -> None:
self._buffer.write(message)
if new_line:
self._buffer.write("\n")
| {
"content_hash": "91f3f9dab70eeb658d79944eb5fcb463",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 84,
"avg_line_length": 26.291666666666668,
"alnum_prop": 0.5800316957210776,
"repo_name": "sdispater/cleo",
"id": "e0c14bef9ec6395eec893fbf9e3f3d98fcae9e6b",
"size": "1262",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleo/io/outputs/buffered_output.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335366"
}
],
"symlink_target": ""
} |
Subsets and Splits