text
stringlengths 4
1.02M
| meta
dict |
---|---|
import pytest
import os
import dj_haystack_url
def test_parser_for_xapian_backend_with_relative_path():
config = dj_haystack_url.parse('xapian:relative/path/to/xapian_index')
expected = {
'ENGINE': 'xapian_backend.XapianEngine',
'PATH': 'relative/path/to/xapian_index',
}
assert config == expected
def test_parser_for_xapian_backend_with_absolute_path():
config = dj_haystack_url.parse('xapian:/absolute/path/to/xapian_index')
expected = {
'ENGINE': 'xapian_backend.XapianEngine',
'PATH': '/absolute/path/to/xapian_index',
}
assert config == expected
def test_parser_for_xapian_backend_with_user_home_in_path():
config = dj_haystack_url.parse('xapian:~/path/under/home/dir')
home_dir = os.path.expanduser('~')
expected = {
'ENGINE': 'xapian_backend.XapianEngine',
'PATH': os.path.join(home_dir, 'path/under/home/dir'),
}
assert config == expected
def test_parser_for_invalid_solr_url():
with pytest.raises(ValueError) as err:
dj_haystack_url.parse('xapian:')
assert str(err.value) == 'PATH value cannot be empty for the xapian backend'
| {
"content_hash": "16c93793644f8d3ec60ccfaf21f8f215",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 80,
"avg_line_length": 28.390243902439025,
"alnum_prop": 0.6615120274914089,
"repo_name": "simpleenergy/dj-haystack-url",
"id": "6b26e4ce1a6e6259fb97236f3bae8b96d234c1cc",
"size": "1164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_xapian_parsing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1270"
},
{
"name": "Python",
"bytes": "7952"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.core.urlresolvers import reverse
from django import http
from django.test.utils import override_settings
from mox3.mox import IsA # noqa
from openstack_dashboard import api
from openstack_dashboard.test import helpers as test
from openstack_dashboard.dashboards.admin.images import tables
class ImageCreateViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',)})
def test_admin_image_create_view_uses_admin_template(self):
filters = {'disk_format': 'aki'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
filters = {'disk_format': 'ari'}
api.glance.image_list_detailed(
IsA(http.HttpRequest), filters=filters).AndReturn(
[self.images.list(), False, False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:create'))
self.assertTemplateUsed(res, 'admin/images/create.html')
class ImagesViewTest(test.BaseAdminViewTests):
@test.create_stubs({api.glance: ('image_list_detailed',),
api.keystone: ('tenant_list',)})
def test_images_list(self):
filters = {'is_public': None}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
paginate=True,
filters=filters,
sort_dir='asc',
sort_key='name',
reversed_order=False) \
.AndReturn([self.images.list(),
False, False])
# Test tenant list
api.keystone.tenant_list(IsA(http.HttpRequest)).\
AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
res = self.client.get(
reverse('horizon:admin:images:index'))
self.assertContains(res, 'test_tenant', 8, 200)
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertEqual(len(res.context['images_table'].data),
len(self.images.list()))
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.keystone: ('tenant_list',)})
def test_images_list_get_pagination(self):
images = self.images.list()[:5]
filters = {'is_public': None}
kwargs = {'paginate': True, 'filters': filters,
'sort_dir': 'asc', 'sort_key': 'name',
'reversed_order': False}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
**kwargs) \
.AndReturn([images, True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
**kwargs) \
.AndReturn([images[:2], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
**kwargs) \
.AndReturn([images[2:4], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[4].id,
**kwargs) \
.AndReturn([images[4:], True, True])
# Test tenant list
api.keystone.tenant_list(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertContains(res, 'test_tenant', 6, 200)
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
params = "=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# get second page (items 2-4)
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
self.assertContains(res, 'test_tenant', 3, 200)
params = "=".join([tables.AdminImagesTable._meta.pagination_param,
images[4].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# get third page (item 5)
self.assertEqual(len(res.context['images_table'].data),
1)
self.assertContains(res, 'test_tenant', 2, 200)
@override_settings(API_RESULT_PAGE_SIZE=2)
@test.create_stubs({api.glance: ('image_list_detailed',),
api.keystone: ('tenant_list',)})
def test_images_list_get_prev_pagination(self):
images = self.images.list()[:3]
filters = {'is_public': None}
kwargs = {'paginate': True, 'filters': filters,
'sort_dir': 'asc', 'sort_key': 'name'}
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
reversed_order=False,
**kwargs) \
.AndReturn([images, True, False])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=None,
reversed_order=False,
**kwargs) \
.AndReturn([images[:2], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
reversed_order=False,
**kwargs) \
.AndReturn([images[2:], True, True])
api.glance.image_list_detailed(IsA(http.HttpRequest),
marker=images[2].id,
reversed_order=True,
**kwargs) \
.AndReturn([images[:2], True, True])
# Test tenant list
api.keystone.tenant_list(IsA(http.HttpRequest)).MultipleTimes().\
AndReturn([self.tenants.list(), False])
self.mox.ReplayAll()
url = reverse('horizon:admin:images:index')
res = self.client.get(url)
# get all
self.assertEqual(len(res.context['images_table'].data),
len(images))
self.assertTemplateUsed(res, 'admin/images/index.html')
self.assertContains(res, 'test_tenant', 4, 200)
res = self.client.get(url)
# get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
self.assertContains(res, 'test_tenant', 3, 200)
params = "=".join([tables.AdminImagesTable._meta.pagination_param,
images[2].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# get second page (item 3)
self.assertEqual(len(res.context['images_table'].data), 1)
self.assertContains(res, 'test_tenant', 2, 200)
params = "=".join([tables.AdminImagesTable._meta.prev_pagination_param,
images[2].id])
url = "?".join([reverse('horizon:admin:images:index'), params])
res = self.client.get(url)
# prev back to get first page with 2 items
self.assertEqual(len(res.context['images_table'].data),
settings.API_RESULT_PAGE_SIZE)
self.assertContains(res, 'test_tenant', 3, 200)
| {
"content_hash": "253f116449fe62d055204a9c50508d87",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 79,
"avg_line_length": 45.95,
"alnum_prop": 0.5267803167694354,
"repo_name": "ankur-gupta91/horizon-net-ip",
"id": "42c5e797826bd3c61ea776e020ce6a7f4c400c28",
"size": "8876",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/admin/images/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "294011"
},
{
"name": "HTML",
"bytes": "1292070"
},
{
"name": "JavaScript",
"bytes": "3301345"
},
{
"name": "Makefile",
"bytes": "6753"
},
{
"name": "Python",
"bytes": "13673798"
},
{
"name": "Shell",
"bytes": "42875"
}
],
"symlink_target": ""
} |
from dynaconf.utils.parse_conf import parse_conf_data
def parse_data(data):
"""Return converted data from @int, @float, @bool, @json markers"""
return parse_conf_data(data)
def custom_var_dict(cvarlist):
cvarlist = cvarlist or []
return {
cvar['key']: parse_data(cvar['value'])
for cvar in cvarlist
}
| {
"content_hash": "d291c992890725f16ddfba421cff5ba9",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 71,
"avg_line_length": 24.357142857142858,
"alnum_prop": 0.6451612903225806,
"repo_name": "abnerpc/quokka",
"id": "47510115298ba8b36ed3a38b68de8c5b4cd14d79",
"size": "358",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "quokka/utils/custom_vars.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "104"
},
{
"name": "CSS",
"bytes": "32332"
},
{
"name": "HTML",
"bytes": "119354"
},
{
"name": "JavaScript",
"bytes": "494398"
},
{
"name": "Makefile",
"bytes": "503"
},
{
"name": "Python",
"bytes": "199573"
},
{
"name": "Shell",
"bytes": "12305"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
HEADRequest,
get_element_by_attribute,
parse_iso8601,
)
class YesJapanIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?yesjapan\.com/video/(?P<slug>[A-Za-z0-9\-]*)_(?P<id>[A-Za-z0-9]+)\.html'
_TEST = {
'url': 'http://www.yesjapan.com/video/japanese-in-5-20-wa-and-ga-particle-usages_726497834.html',
'md5': 'f0be416314e5be21a12b499b330c21cf',
'info_dict': {
'id': '726497834',
'title': 'Japanese in 5! #20 - WA And GA Particle Usages',
'description': 'This should clear up some issues most students of Japanese encounter with WA and GA....',
'ext': 'mp4',
'timestamp': 1416391590,
'upload_date': '20141119',
'thumbnail': 're:^https?://.*\.jpg$',
}
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._og_search_title(webpage)
video_url = self._og_search_video_url(webpage)
description = self._og_search_description(webpage)
thumbnail = self._og_search_thumbnail(webpage)
timestamp = None
submit_info = get_element_by_attribute('class', 'pm-submit-data', webpage)
if submit_info:
timestamp = parse_iso8601(self._search_regex(
r'datetime="([^"]+)"', submit_info, 'upload date', fatal=False, default=None))
# attempt to resolve the final URL in order to get a proper extension
redirect_req = HEADRequest(video_url)
req = self._request_webpage(
redirect_req, video_id, note='Resolving final URL', errnote='Could not resolve final URL', fatal=False)
if req:
video_url = req.geturl()
formats = [{
'format_id': 'sd',
'url': video_url,
}]
return {
'id': video_id,
'title': title,
'formats': formats,
'description': description,
'timestamp': timestamp,
'thumbnail': thumbnail,
}
| {
"content_hash": "a0a90090ffe0f762540a9a3ecea898ab",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 117,
"avg_line_length": 35.721311475409834,
"alnum_prop": 0.5663148233134465,
"repo_name": "Electroscholars/P.E.E.R.S",
"id": "112a6c030138e6c7d0e58619d40f3af012e13362",
"size": "2195",
"binary": false,
"copies": "172",
"ref": "refs/heads/master",
"path": "MainWindowArrowTest/youtube_dl/extractor/yesjapan.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "123"
},
{
"name": "Inno Setup",
"bytes": "7102"
},
{
"name": "Python",
"bytes": "2081027"
}
],
"symlink_target": ""
} |
"""
Custom logger handle that adds the output to a text window..
"""
__authors__ = "Nicu Tofan"
__copyright__ = "Copyright 2015, Nicu Tofan"
__credits__ = ["Nicu Tofan"]
__license__ = "3-clause BSD"
__maintainer__ = "Nicu Tofan"
__email__ = "[email protected]"
import logging
class LoggerToWidget(logging.StreamHandler):
"""
Catches the logging output and redirects it to a text box.
Parameters
----------
textctrl : widget
An instance that implements an ``append()`` method with a single
argument (the text to show).
"""
def __init__(self, textctrl):
"""
Constructor.
"""
logging.StreamHandler.__init__(self)
self.textctrl = textctrl
def emit(self, record):
"""
Notified about messages.
"""
msg = self.format(record)
self.textctrl.append(msg)
self.flush()
| {
"content_hash": "9f467716c1a4771b8e88ecf6c1d528a7",
"timestamp": "",
"source": "github",
"line_count": 37,
"max_line_length": 72,
"avg_line_length": 24.35135135135135,
"alnum_prop": 0.5849056603773585,
"repo_name": "TNick/pyl2extra",
"id": "aed457be119b2dc604b192ddd85a4af578d16ada",
"size": "901",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyl2extra/gui/loggui.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "11767"
},
{
"name": "Python",
"bytes": "831896"
},
{
"name": "Shell",
"bytes": "4624"
}
],
"symlink_target": ""
} |
import jsonschema
from girder.exceptions import ValidationException
from girder.utility import setting_utilities
class PluginSettings:
AUTOJOIN = 'autojoin'
@setting_utilities.default(PluginSettings.AUTOJOIN)
def _defaultAutojoin():
return []
@setting_utilities.validator(PluginSettings.AUTOJOIN)
def _validateAutojoin(doc):
autojoinSchema = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'pattern': {
'type': 'string'
},
'groupId': {
'type': 'string',
'minLength': 1
},
'level': {
'type': 'number'
}
},
'required': ['pattern', 'groupId', 'level']
}
}
try:
jsonschema.validate(doc['value'], autojoinSchema)
except jsonschema.ValidationError as e:
raise ValidationException('Invalid autojoin rules: ' + str(e))
| {
"content_hash": "e0668907de2fd68c1142a49ebd2a7e59",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 70,
"avg_line_length": 25.55,
"alnum_prop": 0.5205479452054794,
"repo_name": "Kitware/girder",
"id": "4291ac9d356f97582ad1afa3ac9990877b7204fd",
"size": "1022",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "plugins/autojoin/girder_autojoin/settings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CMake",
"bytes": "26244"
},
{
"name": "CSS",
"bytes": "6537"
},
{
"name": "Dockerfile",
"bytes": "1528"
},
{
"name": "HTML",
"bytes": "14"
},
{
"name": "JavaScript",
"bytes": "1176017"
},
{
"name": "Jinja",
"bytes": "322"
},
{
"name": "Mako",
"bytes": "7571"
},
{
"name": "Pug",
"bytes": "137980"
},
{
"name": "Python",
"bytes": "2018697"
},
{
"name": "Roff",
"bytes": "17"
},
{
"name": "Shell",
"bytes": "3354"
},
{
"name": "Stylus",
"bytes": "48706"
}
],
"symlink_target": ""
} |
import datetime
import os
import glob
from PySide import QtCore, QtGui
from dpa.config import Config
from dpa.action.registry import ActionRegistry
from dpa.notify import Notification, emails_from_unames
from dpa.ptask.area import PTaskArea, PTaskAreaError
from dpa.ptask import PTask
from dpa.queue import get_unique_id, create_queue_task
from dpa.ui.dk.base import BaseDarkKnightDialog, DarkKnightError
from dpa.user import current_username, User
# -----------------------------------------------------------------------------
DK_CONFIG_PATH = "config/notify/dk.cfg"
# -----------------------------------------------------------------------------
class HoudiniDarkKnightDialog(BaseDarkKnightDialog):
# XXX meh.
RENDER_QUEUES = ['cheezwhiz', 'cheddar', 'hold', 'nuke', 'velveeta',
'muenster']
# -------------------------------------------------------------------------
def __init__(self, parent=None):
super(HoudiniDarkKnightDialog, self).__init__(parent=parent)
# ---- controls
controls_widget = self._setup_controls()
scroll_area = QtGui.QScrollArea()
scroll_area.setFocusPolicy(QtCore.Qt.NoFocus)
scroll_area.setWidgetResizable(True)
scroll_area.setWidget(controls_widget)
self.main_layout.addWidget(scroll_area)
self.main_layout.setStretchFactor(scroll_area, 1000)
# ---- submit btn
cancel_btn = QtGui.QPushButton("Cancel")
cancel_btn.clicked.connect(self.close)
submit_btn = QtGui.QPushButton("Submit")
submit_btn.clicked.connect(self.accept)
btn_layout = QtGui.QHBoxLayout()
btn_layout.setContentsMargins(4, 4, 4, 4)
btn_layout.addStretch()
btn_layout.addWidget(cancel_btn)
btn_layout.addWidget(submit_btn)
btn_layout.addStretch()
self.main_layout.addLayout(btn_layout)
self.main_layout.setStretchFactor(btn_layout, 0)
self._version_note_edit.setFocus()
# -------------------------------------------------------------------------
def accept(self):
self.setEnabled(False)
# ---- get the values from the UI
self._frange = self._get_frange_from_controls()
if not self._frange:
self.setEnabled(True)
return
self._frame_list = self._frange.frames
self._render_queue = self._render_queues.currentText()
self._version_note = self._version_note_edit.text()
self._node_to_render = self._write_node_select.itemData(
self._write_node_select.currentIndex())
self._debug_mode = self._debug.isChecked()
if not self._version_note:
self._show_error("Please specify a description of " +
"what's changed in this version.")
self.setEnabled(True)
return
try:
self._render_to_product()
except Exception as e:
self.setEnabled(True)
raise
else:
super(HoudiniDarkKnightDialog, self).accept()
self.setEnabled(True)
# -------------------------------------------------------------------------
def _render_to_product(self):
# get render node reference
render_node = self.session.hou.node(self._node_to_render)
# ---- progress dialog
num_ops = 8
cur_op = 0
progress_dialog = QtGui.QProgressDialog(
"Product render...", "", cur_op, num_ops, self)
progress_dialog.setWindowTitle("Dark Knight is busy...")
progress_dialog.setAutoReset(False)
progress_dialog.setLabelText("Preparing nuke file for rendering...")
progress_dialog.show()
#########################################
# ensure the product has been created
#########################################
progress_dialog.setLabelText("Creating product...")
if not render_node.type().name()=='ifd' or not self._version_note:
raise Exception("The supplied node is not a WriteProduct node.")
print "Creating product for node... " + str(render_node)
ptask_area = PTaskArea.current()
ptask = PTask.get(ptask_area.spec)
if ptask_area.version:
ptask_version = ptask.version(ptask_area.version)
else:
ptask_version = ptask.latest_version
category = 'imgseq'
file_type = 'exr'
product_name = render_node.name()
product_desc = render_node.name() + " mantra render"
product_ver_note = self._version_note
camera_node = self.session.hou.node(render_node.evalParm('camera'))
if not camera_node:
raise Exception("Camera specified is not valid.")
width = camera_node.evalParm("resx")
height = camera_node.evalParm("resy")
resolution = "%sx%s" % (width, height)
create_action_cls = ActionRegistry().get_action('create', 'product')
if not create_action_cls:
raise Exception("Unable to find product creation action.")
create_action = create_action_cls(
product=product_name,
ptask=ptask.spec,
version=ptask_version.number,
category=category,
description=product_desc,
file_type=file_type,
resolution=resolution,
note=product_ver_note,
)
try:
create_action()
except ActionError as e:
raise Exception("Unable to create product: " + str(e))
# provision the ifd directory
try:
create_action.product_repr.area.provision('ifd')
except Exception as e:
raise Exception(
"Unable to create ifd file directory: " + str(e))
ifd_dir = os.path.join(create_action.product_repr.area.path,
'ifd', product_name + '.$F4.ifd')
out_path = os.path.join(create_action.product_repr.area.path,
product_name + '.$F4.' + file_type)
# by default, the mantra frame range has an expression on frame numbers
render_node.parm('f1').deleteAllKeyframes()
render_node.parm('f2').deleteAllKeyframes()
# set frange
render_node.parm('trange').set(1)
render_node.parm('f1').set(self._frange.start)
render_node.parm('f2').set(self._frange.end)
render_node.parm('f3').set(self._frange.step)
# set output
render_node.parm('soho_outputmode').set(1)
render_node.parm('soho_diskfile').set(ifd_dir)
render_node.parm('soho_diskfile').disable(0)
render_node.parm('vm_picture').set(out_path)
render_node.parm('soho_mkpath').set(1)
product_repr = create_action.product_repr
product_repr_area = product_repr.area
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# create ifd files
#########################################
progress_dialog.setLabelText("Generating ifd files...")
render_node.parm('execute').pressButton()
ifd_file_list = glob.glob(
os.path.join(
create_action.product_repr.area.path,
'ifd', '*.ifd')
)
for ifd_file in ifd_file_list:
os.chmod(ifd_file, 0770)
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# sync current work area to version snapshot to render from
#########################################
progress_dialog.setLabelText("Sync'ing the latest work...")
try:
self.session.save()
self._sync_latest()
except Exception as e:
self._show_error("Unable to save & sync the latest work: " + str(e))
self.setEnabled(True)
progress_dialog.close()
return
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# ensure queue directory exists
#########################################
progress_dialog.setLabelText("Provisioning the queue directory...")
try:
product_repr_area.provision('queue')
except Exception as e:
raise DarkKnightError(
"Unable to create queue scripts directory: " + str(e))
cur_op += 1
progress_dialog.setValue(cur_op)
out_dir = product_repr_area.path
ifd_dir = product_repr_area.dir(dir_name='ifd')
queue_dir = product_repr_area.dir(dir_name='queue')
tasks_info_file = os.path.join(queue_dir, 'tasks_info.cfg')
tasks_info_config = Config()
cur_op += 1
progress_dialog.setValue(cur_op)
#########################################
# buidling queue scripts
#########################################
progress_dialog.setLabelText("Building the queue script...")
# # dpaset command to run
dpaset_cmd = 'eval "`dpa env ptask {pt}@{vn}`"'.format(
pt=ptask.spec, vn=ptask_version.number)
# write out queue shell scripts
frame_scripts = []
for frame in self._frame_list:
frame_padded = str(frame).zfill(4)
ifd_file = os.path.join(ifd_dir,
"{pn}.{fn}.ifd".format(pn=product_name, fn=frame_padded))
script_path = os.path.join(queue_dir,
"{pn}.{fn}.sh".format(pn=product_name, fn=frame_padded))
out_file = os.path.join(out_dir,
"{pn}.{fn}.{ft}".format(pn=product_name, fn=frame_padded, ft=file_type) )
render_cmd = "/opt/hfs14/bin/mantra -f {ifd} -V 2a".\
format(
ifd=ifd_file
)
with open(script_path, "w") as script_file:
script_file.write("#!/bin/bash\n\n")
# XXX these should happen automatically in the queue...
script_file.write("source /DPA/wookie/dpa/bash/startup.bash\n")
script_file.write("pipeup\n\n")
script_file.write("# set the ptask version to render\n")
script_file.write(dpaset_cmd + "\n\n")
script_file.write("# render!\n")
script_file.write(render_cmd + "\n\n")
frame_scripts.append((frame_padded, script_path, out_file))
os.chmod(script_path, 0770)
cur_op += 1
progress_dialog.setValue(cur_op)
################################################
# submit to the queue
################################################
now = datetime.datetime.now()
task_id_base = get_unique_id(product_repr_area.spec, dt=now)
frame_tasks = []
# create frame tasks
for (frame, frame_script, out_file) in frame_scripts:
progress_dialog.setLabelText(
"Submitting frame: " + frame_script)
task_id = task_id_base + "_" + frame
if not self._debug_mode:
# create tasks, don't actually submit yet
create_queue_task(self._render_queue, frame_script, task_id,
output_file=out_file, submit=False,
log_path=frame_script + '.log')
frame_tasks.append((frame, task_id))
#
# resubmit frame-by-frame because
# group submit seems to be occasionally
# having problems.
os.system("cqresubmittask {qn} {tid}".format(
qn=self._render_queue, tid=task_id))
cur_op += 1
progress_dialog.setValue(cur_op)
################################################
# task info stuff, allows task ids to
# be retrieved with product spec
################################################
progress_dialog.setLabelText("Creating task info file...")
tasks_info_file = os.path.join(queue_dir, 'tasks_info.cfg')
tasks_info_config = Config()
tasks_info_config.add('base_id', task_id_base)
frame_info = Config()
for (frame, task_id) in frame_tasks:
frame_info.add(str(frame), task_id)
tasks_info_config.add('frame_ids', frame_info)
tasks_info_config.write(tasks_info_file)
os.chmod(tasks_info_file, 0660)
cur_op += 1
progress_dialog.setValue(cur_op)
################################################
# email report
################################################
if not self._debug_mode:
# send msg...
msg_title = "Queue submission report: " + \
now.strftime("%Y/%m/%d %H:%M:%S")
msg_body = "Submitted the following tasks for " + \
ptask.spec + ":\n\n"
msg_body += " Description: " + self._version_note + "\n"
msg_body += " Resolution: " + resolution + "\n"
msg_body += " Render queue: " + self._render_queue + "\n"
msg_body += " Frames: " + str(self._frange) + "\n"
msg_body += " Ifd directory: " + ifd_dir + "\n"
msg_body += "\n"
msg_body += " Base task ID: " + task_id_base + "\n"
msg_body += " Product representation: " + \
product_repr.spec + "\n"
msg_body += " Scripts directory: " + queue_dir + "\n"
msg_body += "\n"
dk_config = ptask.area.config(DK_CONFIG_PATH,
composite_ancestors=True, composite_method="append")
recipients = dk_config.get('notify', [])
recipients.append(current_username())
recipients = emails_from_unames(recipients)
notification = Notification(msg_title, msg_body, recipients,
sender=User.current().email)
notification.send_email()
print recipients
cur_op += 1
progress_dialog.setValue(cur_op)
progress_dialog.close()
# -------------------------------------------------------------------------
def _setup_controls(self):
# ---- verison node
version_note_lbl = QtGui.QLabel("Version description:")
self._version_note_edit = QtGui.QLineEdit()
# ---- mantra nodes
write_nodes = [node
for node in self.session.hou.node("/").allSubChildren()
if node.type().name()=='ifd']
if not write_nodes:
raise DarkKnightError("No WriteProduct nodes to render.")
try:
default_node = self.session.hou.selectedNodes()[0]
except:
default_node = write_nodes[0]
write_node_lbl = QtGui.QLabel('Rendering:')
self._write_node_select = QtGui.QComboBox()
default_index = 0
for (i, node) in enumerate(write_nodes):
node_name = node.name()
node_path = node.path()
node_disp = "{pn} ({nn})".format(
pn=node_name, nn=node_path)
self._write_node_select.addItem(node_disp, node_path)
if node_name == default_node.name():
default_index = i
self._write_node_select.setCurrentIndex(default_index)
# ---- frame range
# frange
render_node_path = self._write_node_select.itemData(
self._write_node_select.currentIndex())
render_node = self.session.hou.node(render_node_path)
min_time = render_node.evalParm('f1')
max_time = render_node.evalParm('f2')
start_time = min_time
end_time = max_time
frange_lbl = QtGui.QLabel("Frame range:")
self._make_frame_range_controls(
min_time, max_time, start_time, end_time)
self._frame_step.setValue(render_node.evalParm('f3'))
controls_layout = QtGui.QGridLayout()
# ---- queue
render_queue_lbl = QtGui.QLabel("Render queue:")
self._render_queues = QtGui.QComboBox()
self._render_queues.addItems(self.__class__.RENDER_QUEUES)
# ---- debug
debug_lbl = QtGui.QLabel("Debug mode:")
self._debug = QtGui.QCheckBox("")
#
self.connect(self._write_node_select, QtCore.SIGNAL("currentIndexChanged(const QString&)"), self._updateFrange)
# ---- layout the controls
controls_layout.addWidget(version_note_lbl, 0, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._version_note_edit, 0, 1)
controls_layout.addWidget(write_node_lbl, 1, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._write_node_select, 1, 1)
controls_layout.addWidget(frange_lbl, 2, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._frange_stack, 2, 1, QtCore.Qt.AlignLeft)
controls_layout.addWidget(self._frange_btn, 2, 2, QtCore.Qt.AlignLeft)
controls_layout.addWidget(render_queue_lbl, 3, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._render_queues, 3, 1, QtCore.Qt.AlignLeft)
controls_layout.addWidget(debug_lbl, 4, 0, QtCore.Qt.AlignRight)
controls_layout.addWidget(self._debug, 4, 1, QtCore.Qt.AlignLeft)
controls_layout.setColumnStretch(2, 1000)
controls_vbox = QtGui.QVBoxLayout()
controls_vbox.addLayout(controls_layout)
controls_vbox.addStretch()
controls_widget = QtGui.QWidget()
controls_widget.setLayout(controls_vbox)
return controls_widget
# -------------------------------------------------------------------------
def _updateFrange(self):
render_node_path = self._write_node_select.itemData(
self._write_node_select.currentIndex())
render_node = self.session.hou.node(render_node_path)
self._frame_start.setValue(render_node.evalParm('f1'))
self._frame_end.setValue(render_node.evalParm('f2'))
self._frame_step.setValue(render_node.evalParm('f3'))
| {
"content_hash": "21e3f0e9511ac51f4eb08839d9e32855",
"timestamp": "",
"source": "github",
"line_count": 502,
"max_line_length": 119,
"avg_line_length": 36.14342629482072,
"alnum_prop": 0.5342261904761905,
"repo_name": "Clemson-DPA/dpa-pipe",
"id": "6a8dbfa010154e59391f1affd814da72f89519f2",
"size": "18226",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dpa/ui/dk/houdini.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "802278"
},
{
"name": "Shell",
"bytes": "9817"
}
],
"symlink_target": ""
} |
import random
import numpy as np
from numba.core import types
from .templates import (ConcreteTemplate, AbstractTemplate, AttributeTemplate,
CallableTemplate, Registry, signature)
from numba.np.numpy_support import numpy_version
from numba.core.overload_glue import glue_typing
registry = Registry()
infer = registry.register
infer_global = registry.register_global
infer_getattr = registry.register_attr
# random.random(), random.seed() etc. are not plain functions, they are bound
# methods of a private object. We have to be careful to use a well-known
# object (e.g. the string "random.seed") as a key, not the bound method itself.
# (same for np.random.random(), etc.)
_int_types = sorted(set((types.intp, types.int64)))
# Should we support float32?
_float_types = [types.float64]
#
# Basics
#
def normalize_shape(shape):
if isinstance(shape, types.Integer):
return types.intp, 1
elif (isinstance(shape, types.BaseTuple) and
all(isinstance(v, types.Integer)) for v in shape):
ndim = len(shape)
return types.UniTuple(types.intp, ndim), ndim
else:
raise TypeError("invalid size type %s" % (shape,))
class RandomTemplate(CallableTemplate):
"""
A template helper to transparently handle the typing of array-returning
np.random.* functions.
"""
def array_typer(self, scalar_typer, size=None):
prefix = self.key.split('.')[0]
assert prefix in ('np', 'random'), self.key
if size is None:
# Scalar variant
def typer(*args, **kwargs):
return scalar_typer(*args, **kwargs)
else:
# Array variant (only for the 'np.random.*' namespace)
def typer(*args, **kwargs):
if prefix == 'random':
raise TypeError("unexpected size parameter for %r"
% (self.key,))
shape, ndim = normalize_shape(size)
# Type the scalar variant and wrap the result in an array
# of the appropriate dimensionality.
sig = scalar_typer(*args, **kwargs)
if sig is not None:
return signature(
types.Array(sig.return_type, ndim, 'C'),
*(sig.args + (shape,)))
return typer
class ConcreteRandomTemplate(RandomTemplate):
"""
A RandomTemplate subclass using the `cases` attribute as a list of
allowed scalar signatures.
"""
def array_typer(self, size=None):
key = self.key
cases = self.cases
context = self.context
def concrete_scalar_typer(*args, **kwargs):
# Filter out omitted args
while args and args[-1] is None:
args = args[:-1]
return context.resolve_overload(key, cases, args, kwargs)
return RandomTemplate.array_typer(self, concrete_scalar_typer, size)
@glue_typing(random.getrandbits, typing_key="random.getrandbits")
class Random_getrandbits(ConcreteTemplate):
cases = [signature(types.uint64, types.int32)]
@glue_typing(random.random, typing_key="random.random")
@glue_typing(np.random.random, typing_key="np.random.random")
class Random_random(ConcreteRandomTemplate):
cases = [signature(types.float64)]
def generic(self):
def typer(size=None):
return self.array_typer(size)()
return typer
if numpy_version >= (1, 17):
glue_typing(
np.random.random_sample,
typing_key="np.random.random_sample",
)(Random_random)
glue_typing(
np.random.sample,
typing_key="np.random.sample",
)(Random_random)
glue_typing(
np.random.ranf,
typing_key="np.random.ranf",
)(Random_random)
@glue_typing(random.randint, typing_key="random.randint")
class Random_randint(ConcreteTemplate):
cases = [signature(tp, tp, tp) for tp in _int_types]
@glue_typing(np.random.randint, typing_key="np.random.randint")
class Random_randint(ConcreteRandomTemplate):
cases = [signature(tp, tp) for tp in _int_types]
cases += [signature(tp, tp, tp) for tp in _int_types]
def generic(self):
def typer(low, high=None, size=None):
return self.array_typer(size)(low, high)
return typer
@glue_typing(random.randrange, typing_key="random.randrange")
class Random_randrange(ConcreteTemplate):
cases = [signature(tp, tp) for tp in _int_types]
cases += [signature(tp, tp, tp) for tp in _int_types]
cases += [signature(tp, tp, tp, tp) for tp in _int_types]
@glue_typing(random.seed, typing_key="random.seed")
@glue_typing(np.random.seed, typing_key="np.random.seed")
class Random_seed(ConcreteTemplate):
cases = [signature(types.void, types.uint32)]
#
# Distributions
#
@glue_typing(np.random.geometric, typing_key="np.random.geometric")
@glue_typing(np.random.logseries, typing_key="np.random.logseries")
@glue_typing(np.random.zipf, typing_key="np.random.zipf")
class Numpy_geometric(ConcreteRandomTemplate):
cases = [signature(types.int64, tp) for tp in _float_types]
def generic(self):
def typer(a, size=None):
return self.array_typer(size)(a)
return typer
@glue_typing(np.random.binomial, typing_key="np.random.binomial")
@glue_typing(np.random.negative_binomial,
typing_key="np.random.negative_binomial")
class Numpy_negative_binomial(ConcreteRandomTemplate):
cases = [signature(types.int64, types.int64, tp) for tp in _float_types]
def generic(self):
def typer(n, p, size=None):
return self.array_typer(size)(n, p)
return typer
@glue_typing(np.random.poisson, typing_key="np.random.poisson")
class Numpy_poisson(ConcreteRandomTemplate):
cases = [signature(types.int64, tp) for tp in _float_types]
cases += [signature(types.int64)]
def generic(self):
def typer(lam=None, size=None):
return self.array_typer(size)(lam)
return typer
@glue_typing(np.random.exponential, typing_key="np.random.exponential")
@glue_typing(np.random.rayleigh, typing_key="np.random.rayleigh")
class Numpy_exponential(ConcreteRandomTemplate):
cases = [signature(tp, tp) for tp in _float_types]
cases += [signature(tp) for tp in _float_types]
def generic(self):
def typer(scale=None, size=None):
return self.array_typer(size)(scale)
return typer
@glue_typing(np.random.hypergeometric, typing_key="np.random.hypergeometric")
class Numpy_hypergeometric(ConcreteRandomTemplate):
cases = [signature(tp, tp, tp, tp) for tp in _int_types]
def generic(self):
def typer(ngood, nbad, nsample, size=None):
return self.array_typer(size)(ngood, nbad, nsample)
return typer
@glue_typing(np.random.laplace, typing_key="np.random.laplace")
@glue_typing(np.random.logistic, typing_key="np.random.logistic")
@glue_typing(np.random.lognormal, typing_key="np.random.lognormal")
@glue_typing(np.random.normal, typing_key="np.random.normal")
class Numpy_normal(ConcreteRandomTemplate):
cases = [signature(tp, tp, tp) for tp in _float_types]
cases += [signature(tp, tp) for tp in _float_types]
cases += [signature(tp) for tp in _float_types]
def generic(self):
def typer(loc=None, scale=None, size=None):
return self.array_typer(size)(loc, scale)
return typer
@glue_typing(np.random.gamma, typing_key="np.random.gamma")
class Numpy_gamma(ConcreteRandomTemplate):
cases = [signature(tp, tp, tp) for tp in _float_types]
cases += [signature(tp, tp) for tp in _float_types]
def generic(self):
def typer(shape, scale=None, size=None):
return self.array_typer(size)(shape, scale)
return typer
@glue_typing(np.random.triangular, typing_key="np.random.triangular")
class Random_ternary_distribution(ConcreteRandomTemplate):
cases = [signature(tp, tp, tp, tp) for tp in _float_types]
def generic(self):
def typer(left, mode, right, size=None):
return self.array_typer(size)(left, mode, right)
return typer
@glue_typing(np.random.beta, typing_key="np.random.beta")
@glue_typing(np.random.f, typing_key="np.random.f")
@glue_typing(np.random.gumbel, typing_key="np.random.gumbel")
@glue_typing(np.random.uniform, typing_key="np.random.uniform")
@glue_typing(np.random.vonmises, typing_key="np.random.vonmises")
@glue_typing(np.random.wald, typing_key="np.random.wald")
@glue_typing(random.betavariate, typing_key="random.betavariate")
@glue_typing(random.gammavariate, typing_key="random.gammavariate")
@glue_typing(random.gauss, typing_key="random.gauss")
@glue_typing(random.lognormvariate, typing_key="random.lognormvariate")
@glue_typing(random.normalvariate, typing_key="random.normalvariate")
@glue_typing(random.uniform, typing_key="random.uniform")
@glue_typing(random.vonmisesvariate, typing_key="random.vonmisesvariate")
@glue_typing(random.weibullvariate, typing_key="random.weibullvariate")
class Random_binary_distribution(ConcreteRandomTemplate):
cases = [signature(tp, tp, tp) for tp in _float_types]
def generic(self):
def typer(a, b, size=None):
return self.array_typer(size)(a, b)
return typer
@glue_typing(np.random.chisquare, typing_key="np.random.chisquare")
@glue_typing(np.random.pareto, typing_key="np.random.pareto")
@glue_typing(np.random.power, typing_key="np.random.power")
@glue_typing(np.random.standard_gamma, typing_key="np.random.standard_gamma")
@glue_typing(np.random.standard_t, typing_key="np.random.standard_t")
@glue_typing(np.random.weibull, typing_key="np.random.weibull")
@glue_typing(random.expovariate, typing_key="random.expovariate")
@glue_typing(random.paretovariate, typing_key="random.paretovariate")
class Random_unary_distribution(ConcreteRandomTemplate):
cases = [signature(tp, tp) for tp in _float_types]
def generic(self):
def typer(a, size=None):
return self.array_typer(size)(a)
return typer
@glue_typing(np.random.standard_cauchy, typing_key="np.random.standard_cauchy")
@glue_typing(np.random.standard_normal, typing_key="np.random.standard_normal")
@glue_typing(np.random.standard_exponential,
typing_key="np.random.standard_exponential")
class Random_nullary_distribution(ConcreteRandomTemplate):
cases = [signature(tp) for tp in _float_types]
def generic(self):
def typer(size=None):
return self.array_typer(size)()
return typer
@glue_typing(random.triangular, typing_key="random.triangular")
class Random_triangular(ConcreteTemplate):
cases = [signature(tp, tp, tp) for tp in _float_types]
cases += [signature(tp, tp, tp, tp) for tp in _float_types]
# NOTE: some functions can have @overloads in numba.targets.randomimpl,
# and therefore don't need a typing declaration here.
| {
"content_hash": "b0e288d4bb0f9f0d62c6f88cf2dc14a7",
"timestamp": "",
"source": "github",
"line_count": 300,
"max_line_length": 79,
"avg_line_length": 36.43666666666667,
"alnum_prop": 0.6743207391821425,
"repo_name": "IntelLabs/numba",
"id": "0dc9cb4394dd8e896129677cc8d4c8b4cf13c053",
"size": "10931",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "numba/core/typing/randomdecl.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "6984"
},
{
"name": "C",
"bytes": "639446"
},
{
"name": "C++",
"bytes": "93702"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Python",
"bytes": "8764393"
},
{
"name": "Shell",
"bytes": "13542"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('fluent_contents', '0001_initial'),
('glamkit_sponsors', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='BeginSponsorBlockItem',
fields=[
('contentitem_ptr', models.OneToOneField(serialize=False, to='fluent_contents.ContentItem', auto_created=True, parent_link=True, primary_key=True)),
('text', models.TextField(help_text=b'HTML is allowed', blank=True)),
],
options={
'db_table': 'contentitem_glamkit_sponsors_beginsponsorblockitem',
'verbose_name': 'Begin Sponsor Block',
},
bases=('fluent_contents.contentitem',),
),
migrations.CreateModel(
name='EndSponsorBlockItem',
fields=[
('contentitem_ptr', models.OneToOneField(serialize=False, to='fluent_contents.ContentItem', auto_created=True, parent_link=True, primary_key=True)),
('text', models.TextField(help_text=b'HTML is allowed', blank=True)),
],
options={
'db_table': 'contentitem_glamkit_sponsors_endsponsorblockitem',
'verbose_name': 'End sponsor block',
},
bases=('fluent_contents.contentitem',),
),
migrations.CreateModel(
name='SponsorPromoItem',
fields=[
('contentitem_ptr', models.OneToOneField(serialize=False, to='fluent_contents.ContentItem', auto_created=True, parent_link=True, primary_key=True)),
('title', models.CharField(help_text='An optional title that will appear at the top of the sponsor logo e.g. Presenting Sponsor.', max_length=120, blank=True)),
('width', models.IntegerField(help_text=b'The width to show the sponsor logo, default 200px', default=200)),
('quality', models.IntegerField(help_text=b'The JPEG quality to use for the sponsor logo, default 85%', default=85)),
('sponsor', models.ForeignKey(to='glamkit_sponsors.Sponsor')),
],
options={
'db_table': 'contentitem_glamkit_sponsors_sponsorpromoitem',
'verbose_name': 'Sponsor promo',
},
bases=('fluent_contents.contentitem',),
),
]
| {
"content_hash": "9744434cfd59ea5f82ab4158d2eb09a4",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 176,
"avg_line_length": 46.58490566037736,
"alnum_prop": 0.5880923450789793,
"repo_name": "ic-labs/django-icekit",
"id": "333c987974f373177403894e08794d0f66cb96dd",
"size": "2493",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "glamkit_sponsors/migrations/0002_beginsponsorblockitem_endsponsorblockitem_sponsorpromoitem.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18019"
},
{
"name": "HTML",
"bytes": "92605"
},
{
"name": "JavaScript",
"bytes": "27803"
},
{
"name": "Python",
"bytes": "1476354"
},
{
"name": "Shell",
"bytes": "37850"
}
],
"symlink_target": ""
} |
"""Embedding functions."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.contrib.layers.python.ops import sparse_feature_cross_op
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import embedding_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import tf_logging as logging
__all__ = [
"safe_embedding_lookup_sparse", "scattered_embedding_lookup",
"scattered_embedding_lookup_sparse", "embedding_lookup_unique",
"embedding_lookup_sparse_with_distributed_aggregation"
]
def safe_embedding_lookup_sparse(embedding_weights,
sparse_ids,
sparse_weights=None,
combiner=None,
default_id=None,
name=None,
partition_strategy="div",
max_norm=None):
"""Lookup embedding results, accounting for invalid IDs and empty features.
The partitioned embedding in `embedding_weights` must all be the same shape
except for the first dimension. The first dimension is allowed to vary as the
vocabulary size is not necessarily a multiple of `P`. `embedding_weights`
may be a `PartitionedVariable` as returned by using
`tf.compat.v1.get_variable()` with a
partitioner.
Invalid IDs (< 0) are pruned from input IDs and weights, as well as any IDs
with non-positive weight. For an entry with no features, the embedding vector
for `default_id` is returned, or the 0-vector if `default_id` is not supplied.
The ids and weights may be multi-dimensional. Embeddings are always aggregated
along the last dimension.
Args:
embedding_weights: A list of `P` float tensors or values representing
partitioned embedding tensors. Alternatively, a `PartitionedVariable`,
created by partitioning along dimension 0. The total unpartitioned shape
should be `[e_0, e_1, ..., e_m]`, where `e_0` represents the vocab size
and `e_1, ..., e_m` are the embedding dimensions.
sparse_ids: `SparseTensor` of shape `[d_0, d_1, ..., d_n]` containing the
ids. `d_0` is typically batch size.
sparse_weights: `SparseTensor` of same shape as `sparse_ids`, containing
float weights corresponding to `sparse_ids`, or `None` if all weights are
be assumed to be 1.0.
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_id: The id to use for an entry with no features.
name: A name for this operation (optional).
partition_strategy: A string specifying the partitioning strategy. Currently
`"div"` and `"mod"` are supported. Default is `"div"`.
max_norm: If not None, all embeddings are l2-normalized to max_norm before
combining.
Returns:
Dense tensor of shape `[d_0, d_1, ..., d_{n-1}, e_1, ..., e_m]`.
Raises:
ValueError: if `embedding_weights` is empty.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if embedding_weights is None:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights) # get underlying Variables.
if not isinstance(embedding_weights, list):
embedding_weights = [embedding_weights]
if len(embedding_weights) < 1:
raise ValueError("Missing embedding_weights %s." % embedding_weights)
dtype = sparse_weights.dtype if sparse_weights is not None else None
if isinstance(embedding_weights, variables.PartitionedVariable):
embedding_weights = list(embedding_weights)
embedding_weights = [
ops.convert_to_tensor(w, dtype=dtype) for w in embedding_weights
]
contrib_tensor_util.assert_same_float_dtype(embedding_weights +
[sparse_weights])
with ops.name_scope(name, "embedding_lookup", embedding_weights +
[sparse_ids, sparse_weights]) as scope:
# Reshape higher-rank sparse ids and weights to linear segment ids.
original_shape = sparse_ids.dense_shape
original_rank_dim = tensor_shape.Dimension(
tensor_shape.dimension_value(sparse_ids.dense_shape.get_shape()[0]))
original_rank = (
array_ops.size(original_shape)
if original_rank_dim.value is None else original_rank_dim.value)
sparse_ids = sparse_ops.sparse_reshape(sparse_ids, [
math_ops.reduce_prod(
array_ops.slice(original_shape, [0], [original_rank - 1])),
array_ops.gather(original_shape, original_rank - 1)
])
if sparse_weights is not None:
sparse_weights = sparse_tensor.SparseTensor(sparse_ids.indices,
sparse_weights.values,
sparse_ids.dense_shape)
# Prune invalid ids and weights.
sparse_ids, sparse_weights = _prune_invalid_ids(sparse_ids, sparse_weights)
if combiner != "sum":
sparse_ids, sparse_weights = _prune_invalid_weights(
sparse_ids, sparse_weights)
# Fill in dummy values for empty features, if necessary.
sparse_ids, is_row_empty = sparse_ops.sparse_fill_empty_rows(
sparse_ids, default_id or 0)
if sparse_weights is not None:
sparse_weights, _ = sparse_ops.sparse_fill_empty_rows(sparse_weights, 1.0)
result = embedding_ops.embedding_lookup_sparse(
embedding_weights,
sparse_ids,
sparse_weights,
combiner=combiner,
partition_strategy=partition_strategy,
name=None if default_id is None else scope,
max_norm=max_norm)
if default_id is None:
# Broadcast is_row_empty to the same shape as embedding_lookup_result,
# for use in Select.
is_row_empty = array_ops.tile(
array_ops.reshape(is_row_empty, [-1, 1]),
array_ops.stack([1, array_ops.shape(result)[1]]))
result = array_ops.where(
is_row_empty, array_ops.zeros_like(result), result, name=scope)
# Reshape back from linear ids back into higher-dimensional dense result.
final_result = array_ops.reshape(
result,
array_ops.concat([
array_ops.slice(
math_ops.cast(original_shape, dtypes.int32), [0],
[original_rank - 1]),
array_ops.slice(array_ops.shape(result), [1], [-1])
], 0))
final_result.set_shape(
tensor_shape.unknown_shape(
(original_rank_dim - 1).value).concatenate(result.get_shape()[1:]))
return final_result
def _prune_invalid_ids(sparse_ids, sparse_weights):
"""Prune invalid IDs (< 0) from the input ids and weights."""
is_id_valid = math_ops.greater_equal(sparse_ids.values, 0)
if sparse_weights is not None:
is_id_valid = math_ops.logical_and(
is_id_valid,
array_ops.ones_like(sparse_weights.values, dtype=dtypes.bool))
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_id_valid)
if sparse_weights is not None:
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_id_valid)
return sparse_ids, sparse_weights
def _prune_invalid_weights(sparse_ids, sparse_weights):
"""Prune invalid weights (< 0) from the input ids and weights."""
if sparse_weights is not None:
is_weights_valid = math_ops.greater(sparse_weights.values, 0)
sparse_ids = sparse_ops.sparse_retain(sparse_ids, is_weights_valid)
sparse_weights = sparse_ops.sparse_retain(sparse_weights, is_weights_valid)
return sparse_ids, sparse_weights
def scattered_embedding_lookup(params,
values,
dimension,
name=None,
hash_key=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each
tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if dimension is None:
raise ValueError("You must specify dimension.")
return _sampled_scattered_embedding_lookup(
params,
values,
dimension=dimension,
sampled_candidates=None,
hash_key=hash_key,
name=name)
def _sampled_scattered_embedding_lookup(params,
values,
dimension=None,
sampled_candidates=None,
hash_key=None,
name=None):
"""Looks up embeddings using parameter hashing for each value in `values`.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
Feature hashing has the pleasant effect of allowing us to compute an embedding
without needing a pre-determined vocabulary, relieving some amount of process
complexity. It also allows for us to maintain embeddings for possibly
trillions of features with a fixed amount of memory.
Note that this is superior to out-of-vocabulary shared "hash buckets" in that
the embedding is extremely likely to be unique for each token as opposed to
being shared across probably-colliding tokens. The price is that we must
compute a hash once for each scalar in the token's embedding as opposed to
once per token.
If `params` is a list, it represents a partition of the embedding parameters.
Each tensor in the list should have the same length, except for the first ones
which may have an additional element. For instance 10 parameters can be
partitioned in 4 tensors with length `[3, 3, 2, 2]`.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each
tensor must be of rank 1 with fully-defined shape.
values: `Tensor` of values to be embedded with shape `[d0, ..., dn]`.
dimension: Embedding dimension. The user must specify either `dimension` or
`sampled_candidates`.
sampled_candidates: An optional `Tensor` of slice indices to keep along the
final dimension with shape `[d0, ..., dn, N]`. If given, `dimension` is
ignored. If `None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
name: An optional name for this op.
Returns:
A `Tensor` with shape `[d0, ..., dn, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, ..., dn, N]`
Raises:
ValueError: if dimension is not positive or the partition size is invalid.
"""
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
with ops.name_scope(name, "scattered_embedding_lookup",
params + [dimension, values]):
# Flatten the values
values_shape = array_ops.shape(values)
values = array_ops.reshape(values, [-1, 1])
if sampled_candidates is None:
if dimension is None:
raise ValueError(
"You must specify either dimension or sampled_candidates.")
if dimension <= 0:
raise ValueError("Dimension must be >0. Given is %d" % dimension)
sampled_candidates = array_ops.tile(
array_ops.expand_dims(math_ops.range(0, dimension), 0),
array_ops.shape(values))
else:
dimension = array_ops.shape(sampled_candidates)[math_ops.subtract(
array_ops.rank(sampled_candidates), 1)]
sampled_candidates_shape = array_ops.shape(sampled_candidates)
dimension_tensor = array_ops.reshape(
dimension, shape=[
1,
])
expected_shape = array_ops.concat([values_shape, dimension_tensor], 0)
with ops.control_dependencies([
control_flow_ops.Assert(
math_ops.reduce_all(
math_ops.equal(sampled_candidates_shape, expected_shape)),
[
"The shape of sampled_candidates: ", sampled_candidates_shape,
" does not match the shape of values: ", values_shape
])
]):
# Flatten sampled_candidates, same way as values are flattened.
sampled_candidates = array_ops.reshape(sampled_candidates,
[-1, dimension])
num_partitions = len(params)
partition_sizes = []
for p in range(num_partitions):
shape = params[p].get_shape()
shape.assert_has_rank(1)
shape.assert_is_fully_defined()
partition_sizes.append(tensor_shape.dimension_value(shape[0]))
num_params = sum(partition_sizes) # Total number of parameters.
# Assert the size of each partition.
for p in range(num_partitions):
expected_size = (num_params - p - 1) // num_partitions + 1
if partition_sizes[p] != expected_size:
raise ValueError("Tensor %d in params has size %d, expected %d." %
(p, partition_sizes[p], expected_size))
# With two values v1 and v2 and 3 dimensions, we will cross
# [[0, 1, 2], [0, 1, 2]] with [[v1], [v2]].
tensors_to_cross = [sampled_candidates, values]
ids = sparse_feature_cross_op.sparse_feature_cross(
tensors_to_cross,
hashed_output=True,
num_buckets=num_params,
hash_key=hash_key)
ids = sparse_ops.sparse_tensor_to_dense(ids)
# No need to validate the indices since we have checked the params
# dimensions and we know the largest id.
result = embedding_ops.embedding_lookup(
params, ids, partition_strategy="div")
return array_ops.reshape(result,
array_ops.concat([values_shape, [dimension]], 0))
def scattered_embedding_lookup_sparse(params,
sparse_values,
dimension,
combiner=None,
default_value=None,
name=None,
hash_key=None):
"""Looks up embeddings of a sparse feature using parameter hashing.
See `tf.contrib.layers.scattered_embedding_lookup` for embedding with hashing.
Args:
params: A `Tensor`, `list` of `Tensors`, or `PartitionedVariable`. Each
tensor must be of rank 1 with fully-defined shape.
sparse_values: A 2-D `SparseTensor` containing the values to be embedded.
Some rows may be empty.
dimension: Embedding dimension
combiner: A string specifying how to combine embedding results for each
entry. Currently "mean", "sqrtn" and "sum" are supported, with "mean" the
default.
default_value: The value to use for an entry with no features.
name: An optional name for this op.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
Returns:
Dense tensor with shape [N, dimension] with N the number of rows in
sparse_values.
Raises:
TypeError: If sparse_values is not a SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if isinstance(params, variables.PartitionedVariable):
params = list(params)
if not isinstance(params, list):
params = [params]
if not isinstance(sparse_values, sparse_tensor.SparseTensor):
raise TypeError("sparse_values must be SparseTensor")
with ops.name_scope(name, "scattered_embedding_lookup_sparse",
params + [sparse_values]) as scope:
# Fill in the empty rows.
if default_value is None:
# Random default values to reduce the risk of collision.
if sparse_values.dtype == dtypes.string:
default_value = "6ZxWzWOHxZ"
else:
default_value = 1288896567
sparse_values, _ = sparse_ops.sparse_fill_empty_rows(
sparse_values, default_value)
segment_ids = sparse_values.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
values = sparse_values.values
values, idx = array_ops.unique(values)
embeddings = scattered_embedding_lookup(
params, values, dimension, hash_key=hash_key)
if combiner == "sum":
embeddings = math_ops.sparse_segment_sum(
embeddings, idx, segment_ids, name=scope)
elif combiner == "mean":
embeddings = math_ops.sparse_segment_mean(
embeddings, idx, segment_ids, name=scope)
elif combiner == "sqrtn":
embeddings = math_ops.sparse_segment_sqrt_n(
embeddings, idx, segment_ids, name=scope)
else:
raise ValueError("Combiner must be one of 'mean', 'sqrtn' or 'sum'.")
return embeddings
def embedding_lookup_unique(params, ids, partition_strategy="mod", name=None):
"""Version of embedding_lookup that avoids duplicate lookups.
This can save communication in the case of repeated ids.
Same interface as embedding_lookup. Except it supports multi-dimensional `ids`
which allows to not reshape input/output to fit gather.
Args:
params: A list of tensors with the same shape and type, or a
`PartitionedVariable`. Shape `[index, d1, d2, ...]`.
ids: A one-dimensional `Tensor` with type `int32` or `int64` containing the
ids to be looked up in `params`. Shape `[ids1, ids2, ...]`.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`.
name: A name for this operation (optional).
Returns:
A `Tensor` with the same type as the tensors in `params` and dimension of
`[ids1, ids2, d1, d2, ...]`.
Raises:
ValueError: If `params` is empty.
"""
with ops.name_scope(name, "EmbeddingLookupUnique", [params, ids]):
ids = ops.convert_to_tensor(ids)
shape = array_ops.shape(ids)
ids_flat = array_ops.reshape(ids,
math_ops.reduce_prod(shape, keepdims=True))
unique_ids, idx = array_ops.unique(ids_flat)
unique_embeddings = embedding_ops.embedding_lookup(params, unique_ids,
partition_strategy)
embeds_flat = array_ops.gather(unique_embeddings, idx)
embed_shape = array_ops.concat(
[shape, array_ops.shape(unique_embeddings)[1:]], 0)
embeds = array_ops.reshape(embeds_flat, embed_shape)
embeds.set_shape(ids.get_shape().concatenate(
unique_embeddings.get_shape()[1:]))
return embeds
def _sampled_scattered_embedding_lookup_sparse(params,
sp_values,
dimension=None,
sampled_candidates=None,
hash_key=None,
with_sign_hash=False,
name=None):
"""Looks up embeddings using parameter hashing for sparse values.
This method looks up selected embedding dimensions if `sampled_candidates` is
given, otherwise looks up all dimensions.
The i-th embedding component of a value v in `values` is found by retrieving
the weight whose index is a fingerprint of the pair (v,i).
The concept is explored as "feature hashing" for model compression in this
paper: http://arxiv.org/pdf/1504.04788.pdf
This is logically equivalent to:
* Transforming `sp_values` (which has shape `[d0, d1]`) into a one-hot
`Tensor` of shape `[d0, N]`.
* Multiplying with a `Tensor` `h` of shape `[N, dimension]`, where
`h(i, j) = params[hash(i, j)]`.
Args:
params: A float `Tensor` with rank 1 and fully-defined shape.
sp_values: A 2D `SparseTensor` to be embedded with shape `[d0, d1]`.
dimension: An int `Tensor` of the final dimension. The user needs to provide
either `dimension` or `sampled_candidates`.
sampled_candidates: An optional `Tensor` of column indices to keep along the
final dimension with shape `[d0, N]`. If given, `dimension` is ignored. If
`None`, looks up all candidates.
hash_key: Specify the hash_key that will be used by the `FingerprintCat64`
function to combine the crosses fingerprints on SparseFeatureCrossOp
(optional).
with_sign_hash: A `bool` indicating whether `h(i, j)` should be multiplied
by `+1` or `-1`, where the value selected is determined by hashing `(i,
j)`. This is often necessary to remove bias resulting from hash
collisions.
name: An optional name for this op.
Returns:
A `Tensor` of shape `[d0, dimension]`.
If `sampled_candidates` is given, the output shape is `[d0, N]`.
Raises:
TypeError: If sp_values is not `SparseTensor`.
ValueError: If both `dimension` and `sampled_candidates` are `None`.
"""
if not isinstance(sp_values, sparse_tensor.SparseTensor):
raise TypeError("sp_values must be SparseTensor")
with ops.name_scope(
name=name,
default_name="sampled_scattered_embedding_lookup_sparse",
values=[sp_values, params, dimension, sampled_candidates]) as name_scope:
segment_ids = sp_values.indices[:, 0]
if sampled_candidates is not None:
# Tile sampled_candidates so there is one line corresponding to each
# element in sp_values.values
sampled_candidates = array_ops.gather(sampled_candidates, segment_ids)
embeddings = _sampled_scattered_embedding_lookup(
params,
sp_values.values,
dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key,
name="values_lookup")
if with_sign_hash:
signs = _sampled_scattered_embedding_lookup(
array_ops.constant([-1., 1.]),
sp_values.values,
dimension=dimension,
sampled_candidates=sampled_candidates,
hash_key=hash_key,
name="signs_lookup")
embeddings = math_ops.multiply(signs, embeddings, name="signs_hash")
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
num_segments = array_ops.shape(sp_values)[0]
return math_ops.unsorted_segment_sum(
embeddings, segment_ids, num_segments=num_segments, name=name_scope)
def embedding_lookup_sparse_with_distributed_aggregation(
params,
sp_ids,
sp_weights,
partition_strategy="mod",
name=None,
combiner=None,
max_norm=None):
"""Computes embeddings for the given ids and weights.
Embeddings belonging to same param are aggregated on that device first. This
op is intended to decrease data transmission and improve parallelism. See
`tf.nn.embedding_lookup_sparse` for the functionality and example of this op.
Args:
params: A single tensor representing the complete embedding tensor, or a
list of P tensors all of same shape except for the first dimension,
representing sharded embedding tensors. Alternatively, a
`PartitionedVariable`, created by partitioning along dimension 0. Each
element must be appropriately sized for the given `partition_strategy`.
sp_ids: N x M SparseTensor of int64 ids (typically from FeatureValueToId),
where N is typically batch size and M is arbitrary.
sp_weights: either a SparseTensor of float / double weights, or None to
indicate all weights should be taken to be 1. If specified, sp_weights
must have exactly the same shape and indices as sp_ids.
partition_strategy: A string specifying the partitioning strategy, relevant
if `len(params) > 1`. Currently `"div"` and `"mod"` are supported. Default
is `"mod"`. See `tf.nn.embedding_lookup` for more details.
name: Optional name for the op.
combiner: A string specifying the reduction op. Currently "mean", "sqrtn"
and "sum" are supported. "sum" computes the weighted sum of the embedding
results for each row. "mean" is the weighted sum divided by the total
weight. "sqrtn" is the weighted sum divided by the square root of the sum
of the squares of the weights.
max_norm: If not None, each embedding is normalized to have l2 norm equal to
max_norm before combining.
Returns:
A dense tensor representing the combined embeddings for the
sparse ids. For each row in the dense tensor represented by sp_ids, the op
looks up the embeddings for all ids in that row, multiplies them by the
corresponding weight, and combines these embeddings as specified.
Raises:
TypeError: If sp_ids is not a SparseTensor, or if sp_weights is neither
None nor SparseTensor.
ValueError: If combiner is not one of {"mean", "sqrtn", "sum"}.
"""
if combiner is None:
logging.warn("The default value of combiner will change from \"mean\" "
"to \"sqrtn\" after 2016/11/01.")
combiner = "mean"
if combiner not in ("mean", "sqrtn", "sum"):
raise ValueError("combiner must be one of 'mean', 'sqrtn' or 'sum'")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
if not isinstance(sp_ids, sparse_tensor.SparseTensor):
raise TypeError("sp_ids must be SparseTensor")
ignore_weights = sp_weights is None
if not ignore_weights:
if not isinstance(sp_weights, sparse_tensor.SparseTensor):
raise TypeError("sp_weights must be either None or SparseTensor")
sp_ids.values.get_shape().assert_is_compatible_with(
sp_weights.values.get_shape())
sp_ids.indices.get_shape().assert_is_compatible_with(
sp_weights.indices.get_shape())
sp_ids.dense_shape.get_shape().assert_is_compatible_with(
sp_weights.dense_shape.get_shape())
# TODO(yleon): Add enhanced node assertions to verify that sp_ids and
# sp_weights have equal indices and shapes.
with ops.name_scope(name, "embedding_lookup_sparse",
params + [sp_ids]) as name:
segment_ids = sp_ids.indices[:, 0]
if segment_ids.dtype != dtypes.int32:
segment_ids = math_ops.cast(segment_ids, dtypes.int32)
ids = sp_ids.values
if ignore_weights:
ids, idx = array_ops.unique(ids)
else:
idx = None
weights = None if ignore_weights else sp_weights.values
embeddings = _embedding_lookup_with_distributed_aggregation(
params,
ids,
partition_strategy=partition_strategy,
max_norm=max_norm,
weights=weights,
idx=idx,
segment_ids=segment_ids)
# Set weights to all one if ignore weights.
if ignore_weights:
weights = array_ops.fill([array_ops.shape(segment_ids)[0]], 1)
if weights.dtype != embeddings.dtype:
weights = math_ops.cast(weights, embeddings.dtype)
# Reshape weights.
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(embeddings) - 1, 0), 1)
bcast_weights_shape = array_ops.concat([array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
if embeddings.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(embeddings.get_shape().ndims - 1)]))
if combiner == "mean":
weight_sum = math_ops.segment_sum(weights, segment_ids)
embeddings = math_ops.div(embeddings, weight_sum)
elif combiner == "sqrtn":
weights_squared = math_ops.pow(weights, 2)
weight_sum = math_ops.segment_sum(weights_squared, segment_ids)
weight_sum_sqrt = math_ops.sqrt(weight_sum)
embeddings = math_ops.div(embeddings, weight_sum_sqrt)
elif combiner != "sum":
assert False, "Unrecognized combiner"
return embeddings
def _do_gather(params, ids, name=None):
"""Deals with doing gather differently for resource variables."""
if isinstance(params, resource_variable_ops.ResourceVariable):
return params.sparse_read(ids, name=name)
return array_ops.gather(params, ids, name=name)
def _embedding_lookup_with_distributed_aggregation(params,
ids,
partition_strategy="mod",
name=None,
max_norm=None,
weights=None,
idx=None,
segment_ids=None):
"""Lookup helper for embedding_lookup_sparse_with_distributed_aggregation."""
if params is None or params == []: # pylint: disable=g-explicit-bool-comparison
raise ValueError("Need at least one param")
if isinstance(params, variables.PartitionedVariable):
params = list(params) # Iterate to get the underlying Variables.
if not isinstance(params, list):
params = [params]
def maybe_normalize(x):
if max_norm is not None:
if x.get_shape().ndims is not None:
ndims = x.get_shape().ndims
else:
ndims = array_ops.size(array_ops.shape(x))
return clip_ops.clip_by_norm(x, max_norm, axes=list(range(1, ndims)))
return x
with ops.name_scope(name, "embedding_lookup_with_distributed_aggregation",
params + [ids]) as name:
np = len(params) # Number of partitions
# Preserve the resource variable status to avoid accidental dense reads.
if not any(
isinstance(p, resource_variable_ops.ResourceVariable) for p in params):
params = ops.convert_n_to_tensor_or_indexed_slices(params, name="params")
if np == 1:
with ops.colocate_with(params[0]):
ret = maybe_normalize(_do_gather(params[0], ids))
ignore_weights = weights is None
if not ignore_weights:
if weights.dtype != ret.dtype:
weights = math_ops.cast(weights, ret.dtype)
# Reshape to allow broadcast
ones = array_ops.fill(
array_ops.expand_dims(array_ops.rank(ret) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(weights), ones], 0)
orig_weights_shape = weights.get_shape()
weights = array_ops.reshape(weights, bcast_weights_shape)
# Set weights shape after reshape
if ret.get_shape().ndims is not None:
weights.set_shape(
orig_weights_shape.concatenate(
[1 for _ in range(ret.get_shape().ndims - 1)]))
ret *= weights
return math_ops.segment_sum(ret, segment_ids, name=name)
else:
return math_ops.sparse_segment_sum(ret, idx, segment_ids, name=name)
else:
ids = ops.convert_to_tensor(ids, name="ids")
flat_ids = array_ops.reshape(ids, [-1])
original_indices = math_ops.range(array_ops.size(flat_ids))
# Create p_assignments and set new_ids depending on the strategy.
if partition_strategy == "mod":
p_assignments = flat_ids % np
new_ids = flat_ids // np
elif partition_strategy == "div":
# Compute num_total_ids as the sum of dim-0 of params, then assign to
# partitions based on a constant number of ids per partition. Optimize
# if we already know the full shape statically.
dim_0_size = params[0].get_shape().dims[0]
for p in xrange(1, np):
dim_0_size += params[p].get_shape().dims[0]
if dim_0_size.value:
num_total_ids = constant_op.constant(dim_0_size, flat_ids.dtype)
else:
dim_0_sizes = []
for p in xrange(np):
if params[p].get_shape().dims[0].value is not None:
dim_0_sizes.append(params[p].get_shape().dims[0].value)
else:
with ops.colocate_with(params[p]):
dim_0_sizes.append(array_ops.shape(params[p])[0])
num_total_ids = math_ops.reduce_sum(
math_ops.cast(array_ops.stack(dim_0_sizes), flat_ids.dtype))
ids_per_partition = num_total_ids // np
extras = num_total_ids % np
p_assignments = math_ops.maximum(flat_ids // (ids_per_partition + 1),
(flat_ids - extras) //
ids_per_partition)
# Emulate a conditional using a boolean indicator tensor
is_in_first_extras_partitions = math_ops.cast(p_assignments < extras,
flat_ids.dtype)
new_ids = (
is_in_first_extras_partitions * (flat_ids %
(ids_per_partition + 1)) +
(1 - is_in_first_extras_partitions) *
((flat_ids - extras) % ids_per_partition))
else:
raise ValueError("Unrecognized partition strategy: " +
partition_strategy)
# Cast partition assignments to int32 for use in dynamic_partition.
# There really should not be more than 2^32 partitions.
p_assignments = math_ops.cast(p_assignments, dtypes.int32)
# Partition list of ids based on assignments into np separate lists
gather_ids = data_flow_ops.dynamic_partition(new_ids, p_assignments, np)
# Similarly, partition the original indices.
pindices = data_flow_ops.dynamic_partition(original_indices,
p_assignments, np)
# Do np separate lookups, finding embeddings for plist[p] in params[p]
partitioned_result = []
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result.append(_do_gather(params[p], gather_ids[p]))
ignore_weights = weights is None
if not ignore_weights:
# Partition weights according to pindices.
partitioned_weight = []
for p in xrange(np):
partitioned_weight.append(array_ops.gather(weights, pindices[p]))
# Reshape each partition result.
element_shape = params[0].get_shape()[1:]
for p in params[1:]:
element_shape = element_shape.merge_with(p.get_shape()[1:])
if element_shape.is_fully_defined():
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([array_ops.shape(pindices[p]), element_shape],
0))
else:
with ops.colocate_with(params[0]):
params_shape = array_ops.shape(params[0])
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = array_ops.reshape(
partitioned_result[p],
array_ops.concat([
array_ops.shape(pindices[p]),
array_ops.slice(params_shape, [1], [-1])
], 0))
# Normalize each partition result.
for p in xrange(np):
with ops.colocate_with(params[p]):
partitioned_result[p] = maybe_normalize(partitioned_result[p])
if not ignore_weights:
# Multiply each partition result with partition weights.
for p in xrange(np):
with ops.colocate_with(params[p]):
if partitioned_weight[p].dtype != partitioned_result[p].dtype:
partitioned_weight[p] = math_ops.cast(partitioned_weight[p],
partitioned_result[p].dtype)
# Reshape partition weights.
ones = array_ops.fill(
array_ops.expand_dims(
array_ops.rank(partitioned_result[p]) - 1, 0), 1)
bcast_weights_shape = array_ops.concat(
[array_ops.shape(partitioned_weight[p]), ones], 0)
orig_weights_shape = partitioned_weight[p].get_shape()
partitioned_weight[p] = array_ops.reshape(partitioned_weight[p],
bcast_weights_shape)
if partitioned_result[p].get_shape().ndims is not None:
partitioned_weight[p].set_shape(
orig_weights_shape.concatenate([
1 for _ in range(partitioned_result[p].get_shape().ndims -
1)
]))
partitioned_result[p] *= partitioned_weight[p]
partitioned_segment_ids = []
for p in xrange(np):
if not ignore_weights:
# Partition segment_ids according to pindices.
p_segment_ids = array_ops.gather(segment_ids, pindices[p])
# Number the p_segment_ids to meet segment_sum's requirements. Note
# that unique_p_segment_ids contains unique segment ids of this
# partition and these ids' order is unchanged.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
partitioned_segment_ids.append(unique_p_segment_ids)
# segment_sum this partition's result.
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.segment_sum(
partitioned_result[p], unique_p_segment_idx)
else:
# When ignore weights, we need to get indexs of elements in idx and
# segment_ids.
_, exclude_idx = array_ops.setdiff1d(idx, pindices[p])
all_idx = math_ops.range(array_ops.shape(idx)[0])
_, include_idx = array_ops.setdiff1d(all_idx, exclude_idx)
# Gather segment_ids and idx according to indexs.
p_segment_ids = array_ops.gather(segment_ids, include_idx)
p_idx = array_ops.gather(idx, include_idx)
# Number the p_segment_ids, same as ignore_weights case above.
unique_p_segment_ids, unique_p_segment_idx = array_ops.unique(
p_segment_ids)
_, unique_p_idx_idx = array_ops.unique(p_idx)
partitioned_segment_ids.append(unique_p_segment_ids)
with ops.colocate_with(params[p]):
partitioned_result[p] = math_ops.sparse_segment_sum(
partitioned_result[p], unique_p_idx_idx, unique_p_segment_idx)
# Concat each partition's segment_ids and result for final segment_sum.
concat_segment_ids = array_ops.concat(partitioned_segment_ids, 0)
concat_partitioned_result = array_ops.concat(partitioned_result, 0)
return math_ops.unsorted_segment_sum(
concat_partitioned_result,
concat_segment_ids,
math_ops.reduce_max(concat_segment_ids) + 1,
name=name)
| {
"content_hash": "65f7e3a0168095153dd05b6849a8a1fa",
"timestamp": "",
"source": "github",
"line_count": 929,
"max_line_length": 92,
"avg_line_length": 45.02906350914962,
"alnum_prop": 0.6384825014343086,
"repo_name": "ghchinoy/tensorflow",
"id": "14bbe5f9b30ef93149aa1fb317bd07d7dc2e6308",
"size": "42521",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tensorflow/contrib/layers/python/layers/embedding_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3568"
},
{
"name": "Batchfile",
"bytes": "15317"
},
{
"name": "C",
"bytes": "699905"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "67022491"
},
{
"name": "CMake",
"bytes": "206499"
},
{
"name": "Dockerfile",
"bytes": "73602"
},
{
"name": "Go",
"bytes": "1585039"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "836400"
},
{
"name": "Jupyter Notebook",
"bytes": "1665583"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "98194"
},
{
"name": "Objective-C",
"bytes": "94022"
},
{
"name": "Objective-C++",
"bytes": "175222"
},
{
"name": "PHP",
"bytes": "17600"
},
{
"name": "Pascal",
"bytes": "3239"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "48407007"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "4733"
},
{
"name": "Shell",
"bytes": "476920"
},
{
"name": "Smarty",
"bytes": "27495"
},
{
"name": "Swift",
"bytes": "56155"
}
],
"symlink_target": ""
} |
from warnings import warn
from skimage.util.dtype import dtype_range
from .base import Plugin
from ..utils import ClearColormap, update_axes_image
import six
__all__ = ['OverlayPlugin']
def recent_mpl_version():
import matplotlib
version = matplotlib.__version__.split('.')
return int(version[0]) == 1 and int(version[1]) >= 2
class OverlayPlugin(Plugin):
"""Plugin for ImageViewer that displays an overlay on top of main image.
The base Plugin class displays the filtered image directly on the viewer.
OverlayPlugin will instead overlay an image with a transparent colormap.
See base Plugin class for additional details.
Attributes
----------
overlay : array
Overlay displayed on top of image. This overlay defaults to a color map
with alpha values varying linearly from 0 to 1.
color : int
Color of overlay.
"""
colors = {'red': (1, 0, 0),
'yellow': (1, 1, 0),
'green': (0, 1, 0),
'cyan': (0, 1, 1)}
def __init__(self, **kwargs):
if not recent_mpl_version():
msg = "Matplotlib >= 1.2 required for OverlayPlugin."
warn(RuntimeWarning(msg))
super(OverlayPlugin, self).__init__(**kwargs)
self._overlay_plot = None
self._overlay = None
self.cmap = None
self.color_names = list(self.colors.keys())
def attach(self, image_viewer):
super(OverlayPlugin, self).attach(image_viewer)
#TODO: `color` doesn't update GUI widget when set manually.
self.color = 0
@property
def overlay(self):
return self._overlay
@overlay.setter
def overlay(self, image):
self._overlay = image
ax = self.image_viewer.ax
if image is None:
ax.images.remove(self._overlay_plot)
self._overlay_plot = None
elif self._overlay_plot is None:
vmin, vmax = dtype_range[image.dtype.type]
self._overlay_plot = ax.imshow(image, cmap=self.cmap,
vmin=vmin, vmax=vmax)
else:
update_axes_image(self._overlay_plot, image)
self.image_viewer.redraw()
@property
def color(self):
return self._color
@color.setter
def color(self, index):
# Update colormap whenever color is changed.
if isinstance(index, six.string_types) and \
index not in self.color_names:
raise ValueError("%s not defined in OverlayPlugin.colors" % index)
else:
name = self.color_names[index]
self._color = name
rgb = self.colors[name]
self.cmap = ClearColormap(rgb)
if self._overlay_plot is not None:
self._overlay_plot.set_cmap(self.cmap)
self.image_viewer.redraw()
@property
def filtered_image(self):
"""Return filtered image.
This "filtered image" is used when saving from the plugin.
"""
return self.overlay
def display_filtered_image(self, image):
"""Display filtered image as an overlay on top of image in viewer."""
self.overlay = image
def closeEvent(self, event):
# clear overlay from ImageViewer on close
self.overlay = None
super(OverlayPlugin, self).closeEvent(event)
def output(self):
"""Return the overlaid image.
Returns
-------
overlay : array, same shape as image
The overlay currently displayed.
data : None
"""
return (self.overlay, None)
| {
"content_hash": "87a6ee4c236774672d6f88ec5acf353a",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 79,
"avg_line_length": 29.459016393442624,
"alnum_prop": 0.5948803561491375,
"repo_name": "stefanv/register_gui",
"id": "373d6bf4f796fc63ca47b699c7b5817781837fe9",
"size": "3594",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "viewer/plugins/overlayplugin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "103870"
}
],
"symlink_target": ""
} |
"""
Created on 2015-05-21
@author: Danny<[email protected]>
DannyWork Project
"""
from __future__ import unicode_literals
from django.contrib.sitemaps import Sitemap
from django.core.urlresolvers import reverse
from .models import Blog
class BlogSitemap(Sitemap):
"""
博客 Sitemap
"""
changefreq = 'never'
priority = 0.8
def items(self):
return Blog.objects.filter(is_deleted=False, is_active=True).order_by('-created')
def lastmod(self, obj):
return obj.created
def location(self, obj):
return reverse('blog_detail', args=[obj.id])
| {
"content_hash": "7d690f614587b011a1965ffb29da86c4",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 89,
"avg_line_length": 19.387096774193548,
"alnum_prop": 0.6755407653910149,
"repo_name": "manyunkai/dannysite4",
"id": "e2b170d64d7b65cd8fff14da60eda87854504d0b",
"size": "627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/blog/sitemap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "203273"
},
{
"name": "HTML",
"bytes": "349894"
},
{
"name": "JavaScript",
"bytes": "239150"
},
{
"name": "PHP",
"bytes": "4398"
},
{
"name": "Python",
"bytes": "129595"
}
],
"symlink_target": ""
} |
import ast
import cPickle
import logging
import numpy
from django.db import models
from django.template import Template, Context
from django.utils import timezone
from django.conf import settings
from django.db.models import Q
from django.db.models.signals import post_delete
from django.dispatch.dispatcher import receiver
logger = logging.getLogger(__name__)
class DataPoint(models.Model):
name = models.CharField(max_length=600)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
created = models.DateTimeField()
options = models.CharField(max_length=100)
homo = models.FloatField()
lumo = models.FloatField()
homo_orbital = models.IntegerField()
energy = models.FloatField()
dipole = models.FloatField()
band_gap = models.FloatField(null=True, blank=True)
vectors = models.ManyToManyField('FeatureVector')
class Meta:
get_latest_by = "created"
def __unicode__(self):
return unicode(self.name)
def save(self, *args, **kwargs):
# Hack to get around the fact that you can not overwrite auto_now_add
if not self.id and not self.created:
self.created = timezone.now()
return super(DataPoint, self).save(*args, **kwargs)
@classmethod
def get_all_data(cls, type=1):
data = DataPoint.objects.filter(band_gap__isnull=False,
exact_name__isnull=False,
vectors__type=type)
M = len(data)
HOMO = numpy.zeros((M, 1))
LUMO = numpy.zeros((M, 1))
GAP = numpy.zeros((M, 1))
vectors = []
for i, x in enumerate(data):
HOMO[i] = x.homo
LUMO[i] = x.lumo
GAP[i] = x.band_gap
vectors.append(x.vectors.get(type=type).vector)
FEATURE = numpy.array(vectors)
return FEATURE, HOMO, LUMO, GAP
class VectorField(models.TextField):
__metaclass__ = models.SubfieldBase
def to_python(self, value):
if type(value) != list:
return ast.literal_eval(value)
else:
return value
def get_prep_value(self, value):
return str(value)
class FeatureVector(models.Model):
NAIVE = 0
DECAY = 1
DECAY_LENGTH = 2
COULOMB = 3
VECTOR_NAMES = (
(NAIVE, "Naive"),
(DECAY, "Decay"),
(DECAY_LENGTH, "Decay_Length"),
(COULOMB, "Coulomb")
)
type = models.IntegerField(choices=VECTOR_NAMES)
exact_name = models.CharField(max_length=1000, null=True, blank=True)
vector = VectorField()
created = models.DateTimeField()
def __unicode__(self):
return unicode(self.exact_name) + ' ' + unicode(self.VECTOR_NAMES[self.type][1])
def save(self, *args, **kwargs):
# Hack to get around the fact that you can not overwrite auto_now_add
if not self.id and not self.created:
self.created = timezone.now()
return super(FeatureVector, self).save(*args, **kwargs)
class Predictor(models.Model):
created = models.DateTimeField(auto_now_add=True)
pickle = models.FileField(upload_to="predictors")
homo_error = models.FloatField()
lumo_error = models.FloatField()
gap_error = models.FloatField()
class Meta:
get_latest_by = "created"
def get_predictors(self):
try:
return self.model
except AttributeError:
logger.info("Loading a new model")
self.model = cPickle.load(self.pickle)
return self.model
class JobTemplate(models.Model):
name = models.CharField(max_length=60)
creator = models.ForeignKey(settings.AUTH_USER_MODEL,
related_name='templates', null=True)
template = models.FileField(upload_to="job_templates")
def read(self):
data = self.template.read()
self.template.seek(0)
return data
def __unicode__(self):
if self.creator is not None:
return "%s:%s" % (self.creator.username, self.name)
return self.name
def get_long_name(self):
return "%s:%d" % (str(self), self.id)
@classmethod
def get_templates(cls, user=None):
if user is not None:
return JobTemplate.objects.filter(Q(creator=user) |
Q(creator__isnull=True))
else:
return JobTemplate.objects.filter(creator__isnull=True)
@classmethod
def render(cls, **kwargs):
if kwargs.get("custom_template"):
template = Template(kwargs.get("template", ''))
else:
base_template = kwargs.get("base_template")
try:
template = Template(base_template.read())
except AttributeError:
template = Template(kwargs.get("template", ''))
c = Context({
"name": kwargs.get("name", ''),
"email": kwargs.get("email", ''),
"nodes": kwargs.get("nodes", 1),
"ncpus": int(kwargs.get("nodes", 1)) * 16,
"time": "%s:00:00" % kwargs.get("walltime", '1'),
"internal": kwargs.get("internal"),
"allocation": kwargs.get("allocation", ''),
})
return template.render(c)
@receiver(post_delete, sender=JobTemplate)
def delete_jobtemplate(sender, instance, **kwargs):
if instance.template:
# Pass false so FileField doesn't save the model.
instance.template.delete(False)
| {
"content_hash": "c040e54fdc6155798305ef530ca51954",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 88,
"avg_line_length": 31.457142857142856,
"alnum_prop": 0.5969118982742961,
"repo_name": "crcollins/chemtools-webapp",
"id": "ca071ffc2d90300792a4621b89fe1c86b9c19804",
"size": "5505",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "402"
},
{
"name": "HTML",
"bytes": "48799"
},
{
"name": "JavaScript",
"bytes": "3298"
},
{
"name": "Python",
"bytes": "438396"
},
{
"name": "Ruby",
"bytes": "1569"
},
{
"name": "Shell",
"bytes": "5732"
}
],
"symlink_target": ""
} |
"""personal URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.contrib import admin
from django.conf.urls.static import static
import views
from resume.views import ResumeView
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
url(r'^admin/', include(admin.site.urls)),
url(r'^resume/', include('resume.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT) | {
"content_hash": "6b52fa2e85acb2650b8855e812a77b5f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 82,
"avg_line_length": 36.5625,
"alnum_prop": 0.7153846153846154,
"repo_name": "SakiFu/personal-website",
"id": "ebc8368c1ae1a136ecfb68f577056f1ed936299f",
"size": "1170",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "personal/personal/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "52462"
},
{
"name": "HTML",
"bytes": "52789"
},
{
"name": "JavaScript",
"bytes": "224789"
},
{
"name": "PHP",
"bytes": "1894"
},
{
"name": "Python",
"bytes": "19860"
}
],
"symlink_target": ""
} |
from __future__ import with_statement
from alembic import context
from sqlalchemy import engine_from_config, pool
from logging.config import fileConfig
import ConfigParser
import os, sys
sys.path.append(os.getcwd())
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# for 'autogenerate' support
from db import dfa_db_models
target_metadata = dfa_db_models.Base.metadata
# other values from the config, defined by the needs of env.py,
# can be acquired:
# my_important_option = config.get_main_option("my_important_option")
# ... etc.
enabler_config = ConfigParser.ConfigParser()
enabler_config.read('/etc/saf/enabler_conf.ini')
connection_url = enabler_config.get('dfa_mysql', 'connection')
config.set_main_option('sqlalchemy.url', connection_url)
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = config.get_main_option("sqlalchemy.url")
context.configure(url=url, target_metadata=target_metadata)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
engine = engine_from_config(
config.get_section(config.config_ini_section),
prefix='sqlalchemy.',
poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
| {
"content_hash": "8be3321c441ee972cdf48881ee89f080",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 69,
"avg_line_length": 27.5,
"alnum_prop": 0.7113636363636363,
"repo_name": "CiscoSystems/fabric_enabler",
"id": "29e249d7249cb6c0f3c3bd78096a40588f343148",
"size": "2200",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dfa/db/migration/env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "920145"
},
{
"name": "Shell",
"bytes": "8220"
}
],
"symlink_target": ""
} |
"""
Use libraries from a virtualenv (by modifying sys.path) in production.
"""
import os
import sys
def setup_path() -> None:
if os.path.basename(sys.prefix) != "zulip-py3-venv":
BASE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
venv = os.path.join(BASE_DIR, "zulip-py3-venv")
activate_this = os.path.join(venv, "bin", "activate_this.py")
activate_locals = dict(__file__=activate_this)
exec(open(activate_this).read(), activate_locals)
# Check that the python version running this function
# is same as python version that created the virtualenv.
python_version = "python{}.{}".format(*sys.version_info[:2])
if not os.path.exists(os.path.join(venv, 'lib', python_version)):
raise RuntimeError(venv + " was not set up for this Python version")
| {
"content_hash": "d6edb5c47ab7c1b760af5ec62a1c114e",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 95,
"avg_line_length": 45.8421052631579,
"alnum_prop": 0.6498277841561424,
"repo_name": "brainwane/zulip",
"id": "20a190242aa2c9002b3a22eb72f7d4cb971bbdf8",
"size": "871",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "scripts/lib/setup_path.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "423578"
},
{
"name": "Emacs Lisp",
"bytes": "158"
},
{
"name": "HTML",
"bytes": "647926"
},
{
"name": "JavaScript",
"bytes": "2886792"
},
{
"name": "Pascal",
"bytes": "1113"
},
{
"name": "Perl",
"bytes": "398747"
},
{
"name": "Puppet",
"bytes": "90558"
},
{
"name": "Python",
"bytes": "6000548"
},
{
"name": "Ruby",
"bytes": "249744"
},
{
"name": "Shell",
"bytes": "110849"
},
{
"name": "TypeScript",
"bytes": "9543"
}
],
"symlink_target": ""
} |
"""Tests for the Switch as X Siren platform."""
from homeassistant.components.siren import DOMAIN as SIREN_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.components.switch_as_x.const import CONF_TARGET_DOMAIN, DOMAIN
from homeassistant.const import (
CONF_ENTITY_ID,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
Platform,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry
async def test_default_state(hass: HomeAssistant) -> None:
"""Test siren switch default state."""
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: "switch.test",
CONF_TARGET_DOMAIN: Platform.SIREN,
},
title="Noise Maker",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("siren.noise_maker")
assert state is not None
assert state.state == "unavailable"
assert state.attributes["supported_features"] == 3
async def test_service_calls(hass: HomeAssistant) -> None:
"""Test service calls affecting the switch as siren entity."""
await async_setup_component(hass, "switch", {"switch": [{"platform": "demo"}]})
config_entry = MockConfigEntry(
data={},
domain=DOMAIN,
options={
CONF_ENTITY_ID: "switch.decorative_lights",
CONF_TARGET_DOMAIN: Platform.SIREN,
},
title="noise_maker",
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get("siren.noise_maker").state == STATE_ON
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TOGGLE,
{CONF_ENTITY_ID: "siren.noise_maker"},
blocking=True,
)
assert hass.states.get("switch.decorative_lights").state == STATE_OFF
assert hass.states.get("siren.noise_maker").state == STATE_OFF
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_ON,
{CONF_ENTITY_ID: "siren.noise_maker"},
blocking=True,
)
assert hass.states.get("switch.decorative_lights").state == STATE_ON
assert hass.states.get("siren.noise_maker").state == STATE_ON
await hass.services.async_call(
SIREN_DOMAIN,
SERVICE_TURN_OFF,
{CONF_ENTITY_ID: "siren.noise_maker"},
blocking=True,
)
assert hass.states.get("switch.decorative_lights").state == STATE_OFF
assert hass.states.get("siren.noise_maker").state == STATE_OFF
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_ON,
{CONF_ENTITY_ID: "switch.decorative_lights"},
blocking=True,
)
assert hass.states.get("switch.decorative_lights").state == STATE_ON
assert hass.states.get("siren.noise_maker").state == STATE_ON
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TURN_OFF,
{CONF_ENTITY_ID: "switch.decorative_lights"},
blocking=True,
)
assert hass.states.get("switch.decorative_lights").state == STATE_OFF
assert hass.states.get("siren.noise_maker").state == STATE_OFF
await hass.services.async_call(
SWITCH_DOMAIN,
SERVICE_TOGGLE,
{CONF_ENTITY_ID: "switch.decorative_lights"},
blocking=True,
)
assert hass.states.get("switch.decorative_lights").state == STATE_ON
assert hass.states.get("siren.noise_maker").state == STATE_ON
| {
"content_hash": "d9f2b7704eb00beb6e7c0de46f051129",
"timestamp": "",
"source": "github",
"line_count": 117,
"max_line_length": 83,
"avg_line_length": 31.803418803418804,
"alnum_prop": 0.6560064498790648,
"repo_name": "mezz64/home-assistant",
"id": "2b3dedf6fb883b5ab7e6379a91ec9fd687f0d82a",
"size": "3721",
"binary": false,
"copies": "4",
"ref": "refs/heads/dev",
"path": "tests/components/switch_as_x/test_siren.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2963"
},
{
"name": "PLSQL",
"bytes": "840"
},
{
"name": "Python",
"bytes": "52481895"
},
{
"name": "Shell",
"bytes": "6252"
}
],
"symlink_target": ""
} |
from .flaskext import FlaskJSONAPI
from .serializer import (JSONAPI, AttributeActions, RelationshipActions,
Permissions, attr_descriptor, relationship_descriptor,
permission_test)
| {
"content_hash": "f479632621a83a80c29095b605b89236",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 79,
"avg_line_length": 57.5,
"alnum_prop": 0.6652173913043479,
"repo_name": "angelosarto/sqlalchemy-jsonapi",
"id": "fcc0dcaa06ef25b48d5d1b4307e21fe56fe370ca",
"size": "230",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sqlalchemy_jsonapi/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "91323"
}
],
"symlink_target": ""
} |
"""Test BIP65 (CHECKLOCKTIMEVERIFY).
Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
1351.
"""
from test_framework.blocktools import (
create_block,
create_coinbase,
)
from test_framework.messages import (
CTransaction,
msg_block,
)
from test_framework.p2p import P2PInterface
from test_framework.script import (
CScript,
CScriptNum,
OP_1NEGATE,
OP_CHECKLOCKTIMEVERIFY,
OP_DROP,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
from test_framework.wallet import (
MiniWallet,
MiniWalletMode,
)
CLTV_HEIGHT = 1351
# Helper function to modify a transaction by
# 1) prepending a given script to the scriptSig of vin 0 and
# 2) (optionally) modify the nSequence of vin 0 and the tx's nLockTime
def cltv_modify_tx(tx, prepend_scriptsig, nsequence=None, nlocktime=None):
assert_equal(len(tx.vin), 1)
if nsequence is not None:
tx.vin[0].nSequence = nsequence
tx.nLockTime = nlocktime
tx.vin[0].scriptSig = CScript(prepend_scriptsig + list(CScript(tx.vin[0].scriptSig)))
tx.rehash()
def cltv_invalidate(tx, failure_reason):
# Modify the signature in vin 0 and nSequence/nLockTime of the tx to fail CLTV
#
# According to BIP65, OP_CHECKLOCKTIMEVERIFY can fail due the following reasons:
# 1) the stack is empty
# 2) the top item on the stack is less than 0
# 3) the lock-time type (height vs. timestamp) of the top stack item and the
# nLockTime field are not the same
# 4) the top stack item is greater than the transaction's nLockTime field
# 5) the nSequence field of the txin is 0xffffffff
assert failure_reason in range(5)
scheme = [
# | Script to prepend to scriptSig | nSequence | nLockTime |
# +-------------------------------------------------+------------+--------------+
[[OP_CHECKLOCKTIMEVERIFY], None, None],
[[OP_1NEGATE, OP_CHECKLOCKTIMEVERIFY, OP_DROP], None, None],
[[CScriptNum(1000), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 1296688602], # timestamp of genesis block
[[CScriptNum(1000), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, 500],
[[CScriptNum(500), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0xffffffff, 500],
][failure_reason]
cltv_modify_tx(tx, prepend_scriptsig=scheme[0], nsequence=scheme[1], nlocktime=scheme[2])
def cltv_validate(tx, height):
# Modify the signature in vin 0 and nSequence/nLockTime of the tx to pass CLTV
scheme = [[CScriptNum(height), OP_CHECKLOCKTIMEVERIFY, OP_DROP], 0, height]
cltv_modify_tx(tx, prepend_scriptsig=scheme[0], nsequence=scheme[1], nlocktime=scheme[2])
class BIP65Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'[email protected]',
'-par=1', # Use only one script thread to get the exact reject reason for testing
'-acceptnonstdtxn=1', # cltv_invalidate is nonstandard
]]
self.setup_clean_chain = True
self.rpc_timeout = 480
def test_cltv_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip65'], {
"active": is_active,
"height": CLTV_HEIGHT,
"type": "buried",
},
)
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
wallet = MiniWallet(self.nodes[0], mode=MiniWalletMode.RAW_OP_TRUE)
self.test_cltv_info(is_active=False)
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
wallet.generate(10)
self.nodes[0].generate(CLTV_HEIGHT - 2 - 10)
self.log.info("Test that invalid-according-to-CLTV transactions can still appear in a block")
# create one invalid tx per CLTV failure reason (5 in total) and collect them
invalid_cltv_txs = []
for i in range(5):
spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
cltv_invalidate(spendtx, i)
invalid_cltv_txs.append(spendtx)
tip = self.nodes[0].getbestblockhash()
block_time = self.nodes[0].getblockheader(tip)['mediantime'] + 1
block = create_block(int(tip, 16), create_coinbase(CLTV_HEIGHT - 1), block_time)
block.nVersion = 3
block.vtx.extend(invalid_cltv_txs)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.test_cltv_info(is_active=False) # Not active as of current tip and next block does not need to obey rules
peer.send_and_ping(msg_block(block))
self.test_cltv_info(is_active=True) # Not active as of current tip, but next block must obey rules
assert_equal(self.nodes[0].getbestblockhash(), block.hash)
self.log.info("Test that blocks must now be at least version 4")
tip = block.sha256
block_time += 1
block = create_block(tip, create_coinbase(CLTV_HEIGHT), block_time)
block.nVersion = 3
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['{}, bad-version(0x00000003)'.format(block.hash)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that invalid-according-to-CLTV transactions cannot appear in a block")
block.nVersion = 4
block.vtx.append(CTransaction()) # dummy tx after coinbase that will be replaced later
# create and test one invalid tx per CLTV failure reason (5 in total)
for i in range(5):
spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
cltv_invalidate(spendtx, i)
expected_cltv_reject_reason = [
"non-mandatory-script-verify-flag (Operation not valid with the current stack size)",
"non-mandatory-script-verify-flag (Negative locktime)",
"non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
"non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
"non-mandatory-script-verify-flag (Locktime requirement not satisfied)",
][i]
# First we show that this tx is valid except for CLTV by getting it
# rejected from the mempool for exactly that reason.
assert_equal(
[{
'txid': spendtx.hash,
'wtxid': spendtx.getwtxid(),
'allowed': False,
'reject-reason': expected_cltv_reject_reason,
}],
self.nodes[0].testmempoolaccept(rawtxs=[spendtx.serialize().hex()], maxfeerate=0),
)
# Now we verify that a block with this transaction is also invalid.
block.vtx[1] = spendtx
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
with self.nodes[0].assert_debug_log(expected_msgs=['CheckInputScripts on {} failed with {}'.format(
block.vtx[-1].hash, expected_cltv_reject_reason)]):
peer.send_and_ping(msg_block(block))
assert_equal(int(self.nodes[0].getbestblockhash(), 16), tip)
peer.sync_with_ping()
self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
cltv_validate(spendtx, CLTV_HEIGHT - 1)
block.vtx.pop(1)
block.vtx.append(spendtx)
block.hashMerkleRoot = block.calc_merkle_root()
block.solve()
self.test_cltv_info(is_active=True) # Not active as of current tip, but next block must obey rules
peer.send_and_ping(msg_block(block))
self.test_cltv_info(is_active=True) # Active as of current tip
assert_equal(int(self.nodes[0].getbestblockhash(), 16), block.sha256)
if __name__ == '__main__':
BIP65Test().main()
| {
"content_hash": "db7d83f0ef6f2471f061e51c5228a98f",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 119,
"avg_line_length": 41.95876288659794,
"alnum_prop": 0.6208845208845208,
"repo_name": "apoelstra/bitcoin",
"id": "10d2072dba01a071d0e861f401a0a60aa413a199",
"size": "8354",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "test/functional/feature_cltv.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "448326"
},
{
"name": "C++",
"bytes": "3551748"
},
{
"name": "CSS",
"bytes": "1127"
},
{
"name": "Groff",
"bytes": "19797"
},
{
"name": "HTML",
"bytes": "50621"
},
{
"name": "Java",
"bytes": "2100"
},
{
"name": "Makefile",
"bytes": "63714"
},
{
"name": "Objective-C",
"bytes": "2023"
},
{
"name": "Objective-C++",
"bytes": "7238"
},
{
"name": "Protocol Buffer",
"bytes": "2308"
},
{
"name": "Python",
"bytes": "220811"
},
{
"name": "QMake",
"bytes": "2019"
},
{
"name": "Shell",
"bytes": "37705"
}
],
"symlink_target": ""
} |
import threading
import sublime
class EnsimeEnvironment:
def __init__(self):
self.settings = sublime.load_settings("Ensime.sublime-settings")
self._clientLock = threading.RLock()
self._client = None
def set_client(self, client):
self._clientLock.acquire()
try:
self._client = client
return self._client
finally:
self._clientLock.release()
def client(self):
return self._client
ensime_env = EnsimeEnvironment()
| {
"content_hash": "781cbff259ac17d673a6a10e8c00723a",
"timestamp": "",
"source": "github",
"line_count": 24,
"max_line_length": 68,
"avg_line_length": 19.583333333333332,
"alnum_prop": 0.6723404255319149,
"repo_name": "michaelpnash/sublime-ensime",
"id": "e8fcc316c1a9903df6d6ad039f37df1451d37699",
"size": "470",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "ensime_environment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41357"
},
{
"name": "Shell",
"bytes": "466"
}
],
"symlink_target": ""
} |
__author__ = 'boris'
"""
takes:
all_counts - pickled dict of mutation counts, all_counts[rRNA_name][sample_name] = counts_table
all_Depths - pickled dict of coverage counts , all_depths[rRNA_name][sample_name] = depth_table
min_mutations: if a position has less coverage than less mutations than this across both datasets, the subtracted total will be set to zero, and the ratio to 1
output_prefix
comparisons: any number of sample,control name pairs
makes for each comparison_pair:
a dict of subtraction-normed mutation rates
a dict of variance for subtracted rates
a dict of division-normed mutation rates
a dict of variance for division-normed rates
"""
import sys, math, mod_utils
from collections import defaultdict
def write_out_counts(subtracted_rates, subtraction_errors, divided_rates, division_errors, rRNA, output_filename):
f = open(output_filename, 'w')
f.write('position\tsubtracted\tsub error\tdivided\tdiv error\n')
for position in sorted(subtracted_rates[rRNA]):
line = '%d\t%f\t%f\t%f\t%f\n' % (position, subtracted_rates[rRNA][position], subtraction_errors[rRNA][position], divided_rates[rRNA][position],division_errors[rRNA][position])
f.write(line)
f.close()
def standard_error(mutations, coverage):
return math.sqrt(float(mutations))/float(coverage)
def subtraction_norm(all_counts, all_depths, min_mutations, comparison):
sample = comparison[0]
control = comparison[1]
normalized_rates = defaultdict(dict)
normalized_errors = defaultdict(dict)
for rRNA_name in all_counts:
for position in all_counts[rRNA_name][sample]:
if all_counts[rRNA_name][sample][position] >= min_mutations and all_counts[rRNA_name][control][position] >= min_mutations:
sample_ratio = float(all_counts[rRNA_name][sample][position])/float(all_depths[rRNA_name][sample][position])
sample_error = standard_error(all_counts[rRNA_name][sample][position], all_depths[rRNA_name][sample][position])
control_ratio = float(all_counts[rRNA_name][control][position])/float(all_depths[rRNA_name][control][position])
control_error = standard_error(all_counts[rRNA_name][control][position], all_depths[rRNA_name][control][position])
normalized_rates[rRNA_name][position] = sample_ratio-control_ratio
normalized_errors[rRNA_name][position] = math.sqrt((sample_error**2)+(control_error**2))
else:
normalized_rates[rRNA_name][position] = 0
normalized_errors[rRNA_name][position] = 0
return normalized_rates, normalized_errors
def division_norm(all_counts, all_depths, min_mutations, comparison):
sample = comparison[0]
control = comparison[1]
normalized_rates = defaultdict(dict)
normalized_errors = defaultdict(dict)
for rRNA_name in all_counts:
for position in all_counts[rRNA_name][sample]:
if all_counts[rRNA_name][sample][position] >= min_mutations and all_counts[rRNA_name][control][position] >= min_mutations:
sample_ratio = float(all_counts[rRNA_name][sample][position])/float(all_depths[rRNA_name][sample][position])
sample_error = standard_error(all_counts[rRNA_name][sample][position], all_depths[rRNA_name][sample][position])
control_ratio = float(all_counts[rRNA_name][control][position])/float(all_depths[rRNA_name][control][position])
control_error = standard_error(all_counts[rRNA_name][control][position], all_depths[rRNA_name][control][position])
normalized_rates[rRNA_name][position] = math.log(sample_ratio/control_ratio, 2)
normalized_errors[rRNA_name][position] = ((sample_ratio/control_ratio)*math.sqrt((sample_error/sample_ratio)**2+(control_error/control_ratio)**2))/((sample_ratio/control_ratio)*math.log(2))
#this is a standard error propogation formula
else:
normalized_rates[rRNA_name][position] = 0
normalized_errors[rRNA_name][position] = 0
return normalized_rates, normalized_errors
def main():
all_counts_file, all_depths_file, min_mutations, output_prefix = sys.argv[1:5]
min_mutations = int(min_mutations)
all_counts = mod_utils.unPickle(all_counts_file)
all_depths = mod_utils.unPickle(all_depths_file)
comparisons = (pair.split(',') for pair in sys.argv[5:])
for comparison in comparisons:
subtracted_rates, subtraction_errors = subtraction_norm(all_counts, all_depths, min_mutations, comparison)
divided_rates, division_errors = division_norm(all_counts, all_depths, min_mutations, comparison)
mod_utils.makePickle(subtracted_rates, '%s_%s_%s_sub_norm.pkl' % (output_prefix, comparison[0], comparison[1]))
mod_utils.makePickle(subtraction_errors, '%s_%s_%s_sub_err.pkl' % (output_prefix, comparison[0], comparison[1]))
mod_utils.makePickle(divided_rates, '%s_%s_%s_div_norm.pkl' % (output_prefix, comparison[0], comparison[1]))
mod_utils.makePickle(division_errors, '%s_%s_%s_div_err.pkl' % (output_prefix, comparison[0], comparison[1]))
for rRNA in subtracted_rates:
write_out_counts(subtracted_rates, subtraction_errors, divided_rates, division_errors, rRNA, '%s_%s_%s_%s.txt' % (output_prefix, comparison[0], comparison[1], rRNA))
main() | {
"content_hash": "f59bcb7e7466032b4633ff4f259b7430",
"timestamp": "",
"source": "github",
"line_count": 96,
"max_line_length": 205,
"avg_line_length": 56.510416666666664,
"alnum_prop": 0.6849769585253456,
"repo_name": "borisz264/mod_seq",
"id": "3021d7f2fc24e5e0ec06f7b6db0813aca0ea5826",
"size": "5425",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "unused_scripts/subtract_shapemapper_counts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "315510"
}
],
"symlink_target": ""
} |
"""Generates Json file for native library's resident pages."""
import argparse
import json
import logging
import os
import sys
_SRC_PATH = os.path.abspath(os.path.join(
os.path.dirname(__file__), os.pardir, os.pardir, os.pardir))
sys.path.insert(0, os.path.join(_SRC_PATH, 'third_party', 'catapult', 'devil'))
from devil.android import device_utils
from devil.android import device_errors
def _CreateArgumentParser():
parser = argparse.ArgumentParser(
description='Create JSON file for residency pages')
parser.add_argument('--device-serial', type=str, required=True)
parser.add_argument('--on-device-file-path', type=str,
help='Path to residency.txt', required=True)
parser.add_argument('--output-directory', type=str, help='Output directory',
required=False)
return parser
def _ReadFileFromDevice(device_serial, file_path):
"""Reads the file from the device, and returns its content.
Args:
device_serial: (str) Device identifier
file_path: (str) On-device path to the residency file.
Returns:
(str or None) The file content.
"""
content = None
try:
device = device_utils.DeviceUtils(device_serial)
content = device.ReadFile(file_path, True)
except device_errors.CommandFailedError:
logging.exception(
'Possible failure reaching the device or reading the file')
return content
def ParseResidentPages(resident_pages):
"""Parses and converts the residency data into a list where
the index corresponds to the page number and the value 1 if resident
and 0 otherwise.
|resident_pages| contains a string of resident pages:
0
1
...
...
N
Args:
resident_pages: (str) As returned by ReadFileFromDevice()
Returns:
(list) Pages list.
"""
pages_list = []
expected = 0
for page in resident_pages.splitlines():
while expected < int(page):
pages_list.append(0)
expected += 1
pages_list.append(1)
expected += 1;
return pages_list
def _GetResidentPagesJSON(pages_list):
"""Transforms the pages list to JSON object.
Args:
pages_list: (list) As returned by ParseResidentPages()
Returns:
(JSON object) Pages JSON object.
"""
json_data = []
for i in range(len(pages_list)):
json_data.append({'page_num': i, 'resident': pages_list[i]})
return json_data
def _WriteJSONToFile(json_data, output_file_path):
"""Dumps JSON data to file.
Args:
json_data: (JSON object) Data to be dumped in the file.
output_file_path: (str) Output file path
"""
with open(output_file_path, 'w') as f:
json.dump(json_data, f)
def main():
parser = _CreateArgumentParser()
args = parser.parse_args()
logging.basicConfig(level=logging.INFO)
content = _ReadFileFromDevice(args.device_serial,
args.on_device_file_path)
if not content:
logging.error('Error reading file from device')
return 1
pages_list = ParseResidentPages(content)
pages_json = _GetResidentPagesJSON(pages_list)
_WriteJSONToFile(pages_json, os.path.join(args.output_directory,
'residency.json'))
return 0
if __name__ == '__main__':
sys.exit(main())
| {
"content_hash": "2727018e0b0f1a6249a3e6abb069034d",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 79,
"avg_line_length": 25.928,
"alnum_prop": 0.6670780623264425,
"repo_name": "scheib/chromium",
"id": "5f0fa9ec477363f4eee2179e6b75b0611c85f6a4",
"size": "3429",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "tools/android/native_lib_memory/extract_resident_pages.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""Implements ``ipython_display``, a function to embed images/videos/audio in the
IPython Notebook.
"""
# Notes:
# All media are physically embedded in the IPython Notebook
# (instead of simple links to the original files)
# That is because most browsers use a cache system and they won't
# properly refresh the media when the original files are changed.
import inspect
import os
from base64 import b64encode
from moviepy.audio.AudioClip import AudioClip
from moviepy.tools import extensions_dict
from moviepy.video.io.ffmpeg_reader import ffmpeg_parse_infos
from moviepy.video.VideoClip import ImageClip, VideoClip
try: # pragma: no cover
from IPython.display import HTML
ipython_available = True
class HTML2(HTML): # noqa D101
def __add__(self, other):
return HTML2(self.data + other.data)
except ImportError:
def HTML2(content): # noqa D103
return content
ipython_available = False
sorry = "Sorry, seems like your browser doesn't support HTML5 audio/video"
templates = {
"audio": (
"<audio controls>"
"<source %(options)s src='data:audio/%(ext)s;base64,%(data)s'>"
+ sorry
+ "</audio>"
),
"image": "<img %(options)s src='data:image/%(ext)s;base64,%(data)s'>",
"video": (
"<video %(options)s"
"src='data:video/%(ext)s;base64,%(data)s' controls>" + sorry + "</video>"
),
}
def html_embed(
clip, filetype=None, maxduration=60, rd_kwargs=None, center=True, **html_kwargs
):
"""Returns HTML5 code embedding the clip.
Parameters
----------
clip : moviepy.Clip.Clip
Either a file name, or a clip to preview.
Either an image, a sound or a video. Clips will actually be
written to a file and embedded as if a filename was provided.
filetype : str, optional
One of 'video','image','audio'. If None is given, it is determined
based on the extension of ``filename``, but this can bug.
maxduration : float, optional
An error will be raised if the clip's duration is more than the indicated
value (in seconds), to avoid spoiling the browser's cache and the RAM.
rd_kwargs : dict, optional
Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``.
Allow you to give some options to the render process. You can, for
example, disable the logger bar passing ``dict(logger=None)``.
center : bool, optional
If true (default), the content will be wrapped in a
``<div align=middle>`` HTML container, so the content will be displayed
at the center.
html_kwargs
Allow you to give some options, like ``width=260``, ``autoplay=True``,
``loop=1`` etc.
Examples
--------
>>> from moviepy.editor import *
>>> # later ...
>>> html_embed(clip, width=360)
>>> html_embed(clip.audio)
>>> clip.write_gif("test.gif")
>>> html_embed('test.gif')
>>> clip.save_frame("first_frame.jpeg")
>>> html_embed("first_frame.jpeg")
"""
if rd_kwargs is None: # pragma: no cover
rd_kwargs = {}
if "Clip" in str(clip.__class__):
TEMP_PREFIX = "__temp__"
if isinstance(clip, ImageClip):
filename = TEMP_PREFIX + ".png"
kwargs = {"filename": filename, "with_mask": True}
argnames = inspect.getfullargspec(clip.save_frame).args
kwargs.update(
{key: value for key, value in rd_kwargs.items() if key in argnames}
)
clip.save_frame(**kwargs)
elif isinstance(clip, VideoClip):
filename = TEMP_PREFIX + ".mp4"
kwargs = {"filename": filename, "preset": "ultrafast"}
kwargs.update(rd_kwargs)
clip.write_videofile(**kwargs)
elif isinstance(clip, AudioClip):
filename = TEMP_PREFIX + ".mp3"
kwargs = {"filename": filename}
kwargs.update(rd_kwargs)
clip.write_audiofile(**kwargs)
else:
raise ValueError("Unknown class for the clip. Cannot embed and preview.")
return html_embed(
filename,
maxduration=maxduration,
rd_kwargs=rd_kwargs,
center=center,
**html_kwargs,
)
filename = clip
options = " ".join(["%s='%s'" % (str(k), str(v)) for k, v in html_kwargs.items()])
name, ext = os.path.splitext(filename)
ext = ext[1:]
if filetype is None:
ext = filename.split(".")[-1].lower()
if ext == "gif":
filetype = "image"
elif ext in extensions_dict:
filetype = extensions_dict[ext]["type"]
else:
raise ValueError(
"No file type is known for the provided file. Please provide "
"argument `filetype` (one of 'image', 'video', 'sound') to the "
"ipython display function."
)
if filetype == "video":
# The next lines set the HTML5-cvompatible extension and check that the
# extension is HTML5-valid
exts_htmltype = {"mp4": "mp4", "webm": "webm", "ogv": "ogg"}
allowed_exts = " ".join(exts_htmltype.keys())
try:
ext = exts_htmltype[ext]
except Exception:
raise ValueError(
"This video extension cannot be displayed in the "
"IPython Notebook. Allowed extensions: " + allowed_exts
)
if filetype in ["audio", "video"]:
duration = ffmpeg_parse_infos(filename, decode_file=True)["duration"]
if duration > maxduration:
raise ValueError(
(
"The duration of video %s (%.1f) exceeds the 'maxduration'"
" attribute. You can increase 'maxduration', by passing"
" 'maxduration' parameter to ipython_display function."
" But note that embedding large videos may take all the memory"
" away!"
)
% (filename, duration)
)
with open(filename, "rb") as file:
data = b64encode(file.read()).decode("utf-8")
template = templates[filetype]
result = template % {"data": data, "options": options, "ext": ext}
if center:
result = r"<div align=middle>%s</div>" % result
return result
def ipython_display(
clip,
filetype=None,
maxduration=60,
t=None,
fps=None,
rd_kwargs=None,
center=True,
**html_kwargs,
):
"""Displays clip content in an IPython Notebook.
Remarks: If your browser doesn't support HTML5, this should warn you.
If nothing is displayed, maybe your file or filename is wrong.
Important: The media will be physically embedded in the notebook.
Parameters
----------
clip : moviepy.Clip.Clip
Either the name of a file, or a clip to preview. The clip will actually
be written to a file and embedded as if a filename was provided.
filetype : str, optional
One of ``"video"``, ``"image"`` or ``"audio"``. If None is given, it is
determined based on the extension of ``filename``, but this can bug.
maxduration : float, optional
An error will be raised if the clip's duration is more than the indicated
value (in seconds), to avoid spoiling the browser's cache and the RAM.
t : float, optional
If not None, only the frame at time t will be displayed in the notebook,
instead of a video of the clip.
fps : int, optional
Enables to specify an fps, as required for clips whose fps is unknown.
rd_kwargs : dict, optional
Keyword arguments for the rendering, like ``dict(fps=15, bitrate="50k")``.
Allow you to give some options to the render process. You can, for
example, disable the logger bar passing ``dict(logger=None)``.
center : bool, optional
If true (default), the content will be wrapped in a
``<div align=middle>`` HTML container, so the content will be displayed
at the center.
kwargs
Allow you to give some options, like ``width=260``, etc. When editing
looping gifs, a good choice is ``loop=1, autoplay=1``.
Examples
--------
>>> from moviepy.editor import *
>>> # later ...
>>> clip.ipython_display(width=360)
>>> clip.audio.ipython_display()
>>> clip.write_gif("test.gif")
>>> ipython_display('test.gif')
>>> clip.save_frame("first_frame.jpeg")
>>> ipython_display("first_frame.jpeg")
"""
if not ipython_available:
raise ImportError("Only works inside an IPython Notebook")
if rd_kwargs is None:
rd_kwargs = {}
if fps is not None:
rd_kwargs["fps"] = fps
if t is not None:
clip = clip.to_ImageClip(t)
return HTML2(
html_embed(
clip,
filetype=filetype,
maxduration=maxduration,
center=center,
rd_kwargs=rd_kwargs,
**html_kwargs,
)
)
| {
"content_hash": "51a2c3cb6ad3890b0d88a93c68b50fc6",
"timestamp": "",
"source": "github",
"line_count": 281,
"max_line_length": 86,
"avg_line_length": 32.096085409252666,
"alnum_prop": 0.5982924936245704,
"repo_name": "Zulko/moviepy",
"id": "d18b6a4e040c7642105e9ff9fe01cb98b5837752",
"size": "9019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moviepy/video/io/html_tools.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "622"
},
{
"name": "Python",
"bytes": "536587"
}
],
"symlink_target": ""
} |
from unittest import TestCase
from redis import StrictRedis
import os
from deferrable.backend.dockets import DocketsBackendFactory
from deferrable.queue.dockets import DocketsQueue, DocketsErrorQueue
class TestDocketsQueue(TestCase):
def setUp(self):
self.redis_client = StrictRedis(host=os.getenv("DEFERRABLE_TEST_REDIS_HOST","redis"))
self.factory = DocketsBackendFactory(self.redis_client)
self.backend = self.factory.create_backend_for_group('test')
self.queue = self.backend.queue
def test_make_error_queue(self):
error_queue = self.queue.make_error_queue()
self.assertIsInstance(error_queue, DocketsErrorQueue)
class TestDocketsErrorQueue(TestCase):
pass
| {
"content_hash": "41b62bf099a22f3926b436b3c044a4b2",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 93,
"avg_line_length": 36.25,
"alnum_prop": 0.7517241379310344,
"repo_name": "gamechanger/deferrable",
"id": "c44367a6b1c45b862ef6977110967e2ce4cb5b58",
"size": "725",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/queue/dockets_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Lua",
"bytes": "423"
},
{
"name": "Makefile",
"bytes": "297"
},
{
"name": "Python",
"bytes": "75588"
}
],
"symlink_target": ""
} |
from flask import Blueprint, render_template, abort, request, redirect, session, url_for
from flask.ext.login import current_user, login_user
from sqlalchemy import desc
from shutil import move
from datetime import datetime
from packages.objects import *
from packages.common import *
from packages.config import _cfg
from packages.kpack import PackageInfo
from packages.email import send_new_pacakge_email
import os
import binascii
import zipfile
import urllib
import tempfile
api = Blueprint('api', __name__)
@api.route("/api/v1/login", methods=['POST'])
@json_output
def login():
username = request.form['username']
password = request.form['password']
user = User.query.filter(User.username.ilike(username)).first()
if not user:
return { 'success': False, 'error': 'Your username or password is incorrect.' }
if user.confirmation != '' and user.confirmation != None:
return { 'success': False, 'error': 'Your account is pending. Check your email or contact [email protected]' }
if not bcrypt.checkpw(password, user.password):
return { 'success': False, 'error': 'Your username or password is incorrect.' }
login_user(user)
return { 'success': True }
@api.route("/api/v1/<repo>/<name>")
@json_output
def get_info(repo, name):
package = Package.query.filter(Package.name == name).filter(Package.repo == repo).first()
if not package:
return { 'success': False, 'error': 'Package not found.' }, 404
json = {
'name': package.name,
'repo': package.repo,
'full_name': '{0}/{1}'.format(package.repo, package.name),
'version': package.version,
'author': package.author,
'maintainer': package.maintainer,
'infourl': package.infourl,
'copyright': package.copyright,
'capabilities': package.capabilities,
'dependencies': list(),
'approved': package.approved
}
for d in package.dependencies:
json['dependencies'].append('{0}/{1}'.format(d.repo, d.name))
return json
@api.route("/api/v1/<repo>/<name>/approve", methods=["POST"])
@json_output
def approve_package(repo, name):
package = Package.query.filter(Package.name == name).filter(Package.repo == repo).first()
if not package:
return { 'success': False, 'error': 'Package not found.' }, 404
if not current_user or not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to approve this package.' }, 403
package.approved = True
db.commit()
return { 'success': True }
@api.route("/api/v1/<repo>/<name>/unapprove", methods=["POST"])
@json_output
def unapprove_package(repo, name):
package = Package.query.filter(Package.name == name).filter(Package.repo == repo).first()
if not package:
return { 'success': False, 'error': 'Package not found.' }, 404
if not current_user or not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to unapprove this package.' }, 403
package.approved = False
db.commit()
return { 'success': True }
@api.route("/api/v1/user/<username>/setadmin", methods=["POST"])
@json_output
def set_admin(username):
user = User.query.filter(User.username == username).first()
if not user:
return { 'success': False, 'error': 'User not found.' }, 404
if not current_user or not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to set admins.' }, 403
if current_user.username == user.username:
return { 'success': False, 'error': 'You cannot change your own admin privileges.' }, 403
user.admin = True
db.commit()
return { 'success': True }
@api.route("/api/v1/user/<username>/removeadmin", methods=["POST"])
@json_output
def remove_admin(username):
user = User.query.filter(User.username == username).first()
if not user:
return { 'success': False, 'error': 'User not found.' }, 404
if not current_user or not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to remove admins.' }, 403
if current_user.username == user.username:
return { 'success': False, 'error': 'You cannot change your own admin privileges.' }, 403
user.admin = False
db.commit()
return { 'success': True }
@api.route("/api/v1/user/<username>/confirm/<confirmation>", methods=["POST"])
@json_output
def confirm_user(username, confirmation):
user = User.query.filter(User.username == username).first()
if not user:
return { 'success': False, 'error': 'User not found.' }, 404
if (not current_user or not current_user.admin) and (confirmation != user.confirmation):
return { 'success': False, 'error': 'You do not have permission to confirm this user.' }, 403
user.confirmation = None
db.commit()
return { 'success': True }
@api.route("/api/v1/user/<username>/unconfirm", methods=["POST"])
@json_output
def unconfirm_user(username):
user = User.query.filter(User.username == username).first()
if not user:
return { 'success': False, 'error': 'User not found.' }, 404
if not current_user or not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to unconfirm this user.' }, 403
if current_user.username == user.username:
return { 'success': False, 'error': 'You cannot unconfirm your own account.' }, 403
user.confirmation = binascii.b2a_hex(os.urandom(20)).decode("utf-8")
db.commit()
return { 'success': True }
@api.route("/api/v1/<repo>/<name>/remove", methods=["POST"])
@json_output
def remove_package(repo, name):
package = Package.query.filter(Package.name == name).filter(Package.repo == repo).first()
if not package:
return { 'success': False, 'error': 'Package not found.' }, 404
if not current_user or not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to remove this package.' }, 403
packagePath = os.path.join(_cfg("storage"), package.repo, "{0}-{1}.pkg".format(package.name, package.version))
os.remove(packagePath)
db.delete(package)
db.commit()
return { 'success': True }
@api.route("/api/v1/transfer/<repo>/<name>/<username>", methods=["POST"])
@json_output
def transfer_package(repo, name, username):
package = Package.query.filter(Package.name == name).filter(Package.repo == repo).first()
if not package:
return { 'success': False, 'error': 'Package not found.' }, 404
if not current_user == package.user and not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to move this package.' }, 403
new_user = User.query.filter(User.username == username).first()
if not new_user:
return { 'success': False, 'error': 'User not found' }, 404
package.user = new_user
db.commit()
return { 'success': True }
@api.route("/api/v1/upload", methods=['POST'])
@json_output
@loginrequired
def upload_package():
package_file = request.files.get('package')
if not package_file:
return { 'success': False, 'error': 'You must include a package file.' }
f, path = tempfile.mkstemp()
package_file.save(path)
info = None
try:
info = PackageInfo.read_package(path)
if info.repo == None or info.name == None or info.version == None:
return { 'success': False, 'error': 'This is not a valid KnightOS package.' }, 400
if not info.repo in ['core', 'extra', 'community', 'ports', 'nonfree']:
return { 'success': False, 'error': '{0} is not an acceptable package repository.'.format(info.repo) }, 400
if '/' in info.name:
return { 'success': False, 'error': '{0} is not an acceptable package name.'.format(info.name) }, 400
except:
return { 'success': False, 'error': 'This is not a valid KnightOS package.' }, 400
package = Package()
existing = Package.query.filter(Package.name == info.name).first()
if existing:
if existing.repo != info.repo:
return { 'success': False, 'error': 'This name conflicts with {0}/{1}.'.format(existing.repo, existing.name) }, 403
if existing.user.username != current_user.username and not current_user.admin:
return { 'success': False, 'error': 'You do not have permission to update {0}/{1}.'.format(existing.repo, existing.name) }, 403
package = existing
package.updated = datetime.now()
else:
package.user = current_user
package.name = info.name
package.repo = info.repo
package.approved = False
package.version = '{0}.{1}.{2}'.format(info.version[0], info.version[1], info.version[2])
package.description = info.description
package.author = info.author
package.maintainer = info.maintainer
package.infourl = info.infourl
package.copyright = info.copyright
package.capabilities = ' '.join(info.capabilities)
package.contents = None
package.dependencies = list()
for dep in info.dependencies:
try:
repo = dep.split('/')[0]
name = dep.split('/')[1]
db_dep = Package.query.filter(Package.repo == repo).filter(Package.name == name).first()
if not db_dep:
raise Exception()
package.dependencies.append(db_dep)
print('appended ' + db_dep.name)
except:
return { 'success': False, 'error': '{0} is not a known dependency. Did you upload it first?'.format(dep) }, 400
storage_dir = os.path.join(_cfg("storage"), package.repo)
if not os.path.exists(storage_dir):
os.makedirs(storage_dir)
final_path = os.path.join(storage_dir, "{0}-{1}.pkg".format(package.name, package.version))
move(path, final_path)
if not existing:
db.add(package)
send_new_pacakge_email(package)
db.commit()
return { 'success': True, 'url': '/{0}/{1}'.format(package.repo, package.name) }, 200
| {
"content_hash": "599ac0be9c9edc60d0823f28cb55e887",
"timestamp": "",
"source": "github",
"line_count": 231,
"max_line_length": 139,
"avg_line_length": 43.48051948051948,
"alnum_prop": 0.6443647949024293,
"repo_name": "MaxLeiter/packages.knightos.org",
"id": "df16944c543a42f63fc4fc609c08ef87354d34db",
"size": "10044",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "packages/blueprints/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1134"
},
{
"name": "HTML",
"bytes": "32088"
},
{
"name": "JavaScript",
"bytes": "2228"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "44755"
}
],
"symlink_target": ""
} |
from npr_sfs.datasets.loader import dataNames, loadData
import results.ibme
import results.lumo
import results.compare
from npr_sfs.results.results import batchResults
batch_modules = [results.ibme,
results.lumo,
results.compare]
if __name__ == '__main__':
for batch_module in batch_modules:
batchResults(batch_module.batch_func, batch_module.batch_name)
| {
"content_hash": "a2008d15d31413528b5d8e43773b3048",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 70,
"avg_line_length": 25.375,
"alnum_prop": 0.6921182266009852,
"repo_name": "tody411/NPR-SFS",
"id": "41e7cfcd8cb839b3fd9933b8d26eb834d890e2bb",
"size": "525",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "npr_sfs/main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "27977"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/intangible/ship/shared_tieadvanced_pcd.iff"
result.attribute_template_id = 8
result.stfName("space_item_n","tieadvanced_pcd")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "339b757e674159b3fe063258cb3a1794",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 70,
"avg_line_length": 23.846153846153847,
"alnum_prop": 0.7032258064516129,
"repo_name": "obi-two/Rebelion",
"id": "19ca6d9bc151f3fd73e6d44c2232285c6e8674f0",
"size": "455",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "data/scripts/templates/object/intangible/ship/shared_tieadvanced_pcd.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11818"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2293610"
},
{
"name": "CMake",
"bytes": "39727"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7499185"
},
{
"name": "SQLPL",
"bytes": "41864"
}
],
"symlink_target": ""
} |
from jinja2 import Environment
from jinja2 import FileSystemLoader
from .base import BaseView
class JinjaView(BaseView):
"""
Jinja view.
"""
def _load_engine(self, template_dir):
"""
Load template engine by name and return an instance.
"""
return Environment(loader=FileSystemLoader(template_dir))
def render(self, template_name, variables=None):
"""
Render a template with the passed variables.
"""
if variables is None:
variables = {}
template = self._engine.get_template(template_name)
return template.render(**variables)
def render_source(self, source, variables=None):
"""
Render a source with the passed variables.
"""
if variables is None:
variables = {}
template = self._engine.from_string(source)
return template.render(**variables)
| {
"content_hash": "635d53d1bf897a945e12d0b45ace4d8e",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 65,
"avg_line_length": 27.96969696969697,
"alnum_prop": 0.6132177681473456,
"repo_name": "PythonRails/rails",
"id": "25123de2baaf203e6e21c5e3f06659e764188d36",
"size": "923",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "rails/views/jinja.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "12835"
}
],
"symlink_target": ""
} |
from collections import OrderedDict
from django.db.models import Q
from django.utils.functional import cached_property
from pontoon.base.models import (
Locale,
Project,
Resource,
TranslatedResource,
Translation,
)
from pontoon.tags.models import Tag
class Clonable(object):
"""Instantiated descendants of this class can be called to create a cloned
version of the object.
The clone will be called with attributes listed in `self.clone_kwargs` as
kwargs. These can be overridden when creating the clone.
"""
clone_kwargs = ()
def __init__(self, **kwargs):
for k in self.clone_kwargs:
setattr(self, k, kwargs.get(k))
def __call__(self, **kwargs):
clone_kwargs = dict((k, getattr(self, k)) for k in self.clone_kwargs)
clone_kwargs.update(kwargs)
return self.__class__(**clone_kwargs)
class FilteredDataTool(Clonable):
"""Base Tool for constructing and coalescing aggregating querysets
Descendants of this class will filter a queryset by mapping
self.filter_methods to methods on the class
The data is aggregated and then cached/coalesced to the
data property
It can be cloned to override filtering params
"""
default_groupby = ()
_default_annotations = ()
def __iter__(self):
return iter(self.data)
def __len__(self):
return len(self.data)
@property
def data_manager(self):
"""Entry table through which the query is constructed"""
raise NotImplementedError()
@property
def default_annotations(self):
return OrderedDict(self._default_annotations)
@property
def filtered_data(self):
"""Queryset after applying filter methods"""
data = self.data_manager.all()
for tag_filter in self.filters:
data = tag_filter(data)
return data
@property
def filters(self):
return [getattr(self, "filter_%s" % f) for f in self.filter_methods]
@cached_property
def data(self):
"""Cached and coalesed copy from get_data result"""
return self.coalesce(self.get_data())
def coalesce(self, data):
"""Coalesce the queryset to python data"""
return data
def get_annotations(self):
"""Fields to aggregate"""
anno = self.default_annotations.copy()
anno.update(self.annotations or {})
return anno
def get_data(self):
"""Get the aggregated queryset"""
return self.filtered_data.values(*self.get_groupby()).annotate(
**self.get_annotations()
)
def get_groupby(self):
"""Get groupby fields"""
return self.groupby and [self.groupby] or self.default_groupby
class TagsDataTool(FilteredDataTool):
"""Base Data Tool for retrieving Tag data
This class has the various Pontoon object managers as properties, which
allows the managers to be overridden (theoretically) in a descendant class
"""
_default_annotations = ()
default_groupby = ("resource__tag",)
filter_methods = ("tag", "locales", "projects")
clone_kwargs = ("locales", "projects", "priority", "slug", "path")
@property
def locale_manager(self):
return Locale.objects
@property
def project_manager(self):
return Project.objects
@property
def resource_manager(self):
return Resource.objects
@property
def tag_manager(self):
return Tag.objects
@property
def translation_manager(self):
return Translation.objects
@property
def tr_manager(self):
return TranslatedResource.objects
class TagsTRTool(TagsDataTool):
"""Data Tool from the perspective of TranslatedResources
"""
clone_kwargs = TagsDataTool.clone_kwargs + ("annotations", "groupby")
@property
def data_manager(self):
return self.tr_manager
def filter_locales(self, trs):
return trs.filter(locale__in=self.locales) if self.locales else trs
def filter_path(self, trs):
return (
trs.filter(resource__path__contains=self.path).distinct()
if self.path
else trs
)
def filter_projects(self, trs):
return trs.filter(resource__project__in=self.projects) if self.projects else trs
def filter_tag(self, trs):
"""Filters on tag.slug and tag.priority
"""
q = Q()
if not self.slug:
# if slug is not specified, then just remove all resources
# that have no tag
q &= ~Q(resource__tag__isnull=True)
if self.slug:
q &= Q(resource__tag__slug__contains=self.slug)
if self.priority is not None:
if self.priority is False:
# if priority is False, exclude tags with priority
q &= Q(resource__tag__priority__isnull=True)
elif self.priority is True:
# if priority is True show only tags with priority
q &= Q(resource__tag__priority__isnull=False)
elif isinstance(self.priority, int):
# if priority is an int, filter on that priority
q &= Q(resource__tag__priority=self.priority)
return trs.filter(q)
| {
"content_hash": "80442b2376290bfb76a51966532b2eb7",
"timestamp": "",
"source": "github",
"line_count": 188,
"max_line_length": 88,
"avg_line_length": 28.074468085106382,
"alnum_prop": 0.6271314892004547,
"repo_name": "jotes/pontoon",
"id": "166059b06d49bb8b8b50b3cd88fe056310f852e9",
"size": "5278",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pontoon/tags/utils/base.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226580"
},
{
"name": "Dockerfile",
"bytes": "2640"
},
{
"name": "FreeMarker",
"bytes": "35248"
},
{
"name": "HTML",
"bytes": "151639"
},
{
"name": "JavaScript",
"bytes": "1332848"
},
{
"name": "Makefile",
"bytes": "3551"
},
{
"name": "Python",
"bytes": "1391398"
},
{
"name": "Shell",
"bytes": "3676"
}
],
"symlink_target": ""
} |
import warnings
from http import HTTPStatus
from typing import Any, Iterable, Optional, Set, Tuple
from multidict import CIMultiDict
from yarl import URL
from . import hdrs
from .helpers import CookieMixin
from .typedefs import LooseHeaders, StrOrURL
__all__ = (
"HTTPException",
"HTTPError",
"HTTPRedirection",
"HTTPSuccessful",
"HTTPOk",
"HTTPCreated",
"HTTPAccepted",
"HTTPNonAuthoritativeInformation",
"HTTPNoContent",
"HTTPResetContent",
"HTTPPartialContent",
"HTTPMove",
"HTTPMultipleChoices",
"HTTPMovedPermanently",
"HTTPFound",
"HTTPSeeOther",
"HTTPNotModified",
"HTTPUseProxy",
"HTTPTemporaryRedirect",
"HTTPPermanentRedirect",
"HTTPClientError",
"HTTPBadRequest",
"HTTPUnauthorized",
"HTTPPaymentRequired",
"HTTPForbidden",
"HTTPNotFound",
"HTTPMethodNotAllowed",
"HTTPNotAcceptable",
"HTTPProxyAuthenticationRequired",
"HTTPRequestTimeout",
"HTTPConflict",
"HTTPGone",
"HTTPLengthRequired",
"HTTPPreconditionFailed",
"HTTPRequestEntityTooLarge",
"HTTPRequestURITooLong",
"HTTPUnsupportedMediaType",
"HTTPRequestRangeNotSatisfiable",
"HTTPExpectationFailed",
"HTTPMisdirectedRequest",
"HTTPUnprocessableEntity",
"HTTPFailedDependency",
"HTTPUpgradeRequired",
"HTTPPreconditionRequired",
"HTTPTooManyRequests",
"HTTPRequestHeaderFieldsTooLarge",
"HTTPUnavailableForLegalReasons",
"HTTPServerError",
"HTTPInternalServerError",
"HTTPNotImplemented",
"HTTPBadGateway",
"HTTPServiceUnavailable",
"HTTPGatewayTimeout",
"HTTPVersionNotSupported",
"HTTPVariantAlsoNegotiates",
"HTTPInsufficientStorage",
"HTTPNotExtended",
"HTTPNetworkAuthenticationRequired",
)
############################################################
# HTTP Exceptions
############################################################
class HTTPException(CookieMixin, Exception):
# You should set in subclasses:
# status = 200
status_code = -1
empty_body = False
default_reason = "" # Initialized at the end of the module
def __init__(
self,
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
super().__init__()
if reason is None:
reason = self.default_reason
if text is None:
if not self.empty_body:
text = f"{self.status_code}: {reason}"
else:
if self.empty_body:
warnings.warn(
"text argument is deprecated for HTTP status {} "
"since 4.0 and scheduled for removal in 5.0 (#3462),"
"the response should be provided without a body".format(
self.status_code
),
DeprecationWarning,
stacklevel=2,
)
if headers is not None:
real_headers = CIMultiDict(headers)
else:
real_headers = CIMultiDict()
if content_type is not None:
if not text:
warnings.warn(
"content_type without text is deprecated "
"since 4.0 and scheduled for removal in 5.0 "
"(#3462)",
DeprecationWarning,
stacklevel=2,
)
real_headers[hdrs.CONTENT_TYPE] = content_type
elif hdrs.CONTENT_TYPE not in real_headers and text:
real_headers[hdrs.CONTENT_TYPE] = "text/plain"
self._reason = reason
self._text = text
self._headers = real_headers
self.args = ()
def __bool__(self) -> bool:
return True
@property
def status(self) -> int:
return self.status_code
@property
def reason(self) -> str:
return self._reason
@property
def text(self) -> Optional[str]:
return self._text
@property
def headers(self) -> "CIMultiDict[str]":
return self._headers
def __str__(self) -> str:
return self.reason
def __repr__(self) -> str:
return f"<{self.__class__.__name__}: {self.reason}>"
__reduce__ = object.__reduce__
def __getnewargs__(self) -> Tuple[Any, ...]:
return self.args
class HTTPError(HTTPException):
"""Base class for exceptions with status codes in the 400s and 500s."""
class HTTPRedirection(HTTPException):
"""Base class for exceptions with status codes in the 300s."""
class HTTPSuccessful(HTTPException):
"""Base class for exceptions with status codes in the 200s."""
class HTTPOk(HTTPSuccessful):
status_code = 200
class HTTPCreated(HTTPSuccessful):
status_code = 201
class HTTPAccepted(HTTPSuccessful):
status_code = 202
class HTTPNonAuthoritativeInformation(HTTPSuccessful):
status_code = 203
class HTTPNoContent(HTTPSuccessful):
status_code = 204
empty_body = True
class HTTPResetContent(HTTPSuccessful):
status_code = 205
empty_body = True
class HTTPPartialContent(HTTPSuccessful):
status_code = 206
############################################################
# 3xx redirection
############################################################
class HTTPMove(HTTPRedirection):
def __init__(
self,
location: StrOrURL,
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
if not location:
raise ValueError("HTTP redirects need a location to redirect to.")
super().__init__(
headers=headers, reason=reason, text=text, content_type=content_type
)
self._location = URL(location)
self.headers["Location"] = str(self.location)
@property
def location(self) -> URL:
return self._location
class HTTPMultipleChoices(HTTPMove):
status_code = 300
class HTTPMovedPermanently(HTTPMove):
status_code = 301
class HTTPFound(HTTPMove):
status_code = 302
# This one is safe after a POST (the redirected location will be
# retrieved with GET):
class HTTPSeeOther(HTTPMove):
status_code = 303
class HTTPNotModified(HTTPRedirection):
# FIXME: this should include a date or etag header
status_code = 304
empty_body = True
class HTTPUseProxy(HTTPMove):
# Not a move, but looks a little like one
status_code = 305
class HTTPTemporaryRedirect(HTTPMove):
status_code = 307
class HTTPPermanentRedirect(HTTPMove):
status_code = 308
############################################################
# 4xx client error
############################################################
class HTTPClientError(HTTPError):
pass
class HTTPBadRequest(HTTPClientError):
status_code = 400
class HTTPUnauthorized(HTTPClientError):
status_code = 401
class HTTPPaymentRequired(HTTPClientError):
status_code = 402
class HTTPForbidden(HTTPClientError):
status_code = 403
class HTTPNotFound(HTTPClientError):
status_code = 404
class HTTPMethodNotAllowed(HTTPClientError):
status_code = 405
def __init__(
self,
method: str,
allowed_methods: Iterable[str],
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
allow = ",".join(sorted(allowed_methods))
super().__init__(
headers=headers, reason=reason, text=text, content_type=content_type
)
self.headers["Allow"] = allow
self._allowed = set(allowed_methods) # type: Set[str]
self._method = method
@property
def allowed_methods(self) -> Set[str]:
return self._allowed
@property
def method(self) -> str:
return self._method
class HTTPNotAcceptable(HTTPClientError):
status_code = 406
class HTTPProxyAuthenticationRequired(HTTPClientError):
status_code = 407
class HTTPRequestTimeout(HTTPClientError):
status_code = 408
class HTTPConflict(HTTPClientError):
status_code = 409
class HTTPGone(HTTPClientError):
status_code = 410
class HTTPLengthRequired(HTTPClientError):
status_code = 411
class HTTPPreconditionFailed(HTTPClientError):
status_code = 412
class HTTPRequestEntityTooLarge(HTTPClientError):
status_code = 413
def __init__(self, max_size: int, actual_size: int, **kwargs: Any) -> None:
kwargs.setdefault(
"text",
"Maximum request body size {} exceeded, "
"actual body size {}".format(max_size, actual_size),
)
super().__init__(**kwargs)
class HTTPRequestURITooLong(HTTPClientError):
status_code = 414
class HTTPUnsupportedMediaType(HTTPClientError):
status_code = 415
class HTTPRequestRangeNotSatisfiable(HTTPClientError):
status_code = 416
class HTTPExpectationFailed(HTTPClientError):
status_code = 417
class HTTPMisdirectedRequest(HTTPClientError):
status_code = 421
class HTTPUnprocessableEntity(HTTPClientError):
status_code = 422
class HTTPFailedDependency(HTTPClientError):
status_code = 424
class HTTPUpgradeRequired(HTTPClientError):
status_code = 426
class HTTPPreconditionRequired(HTTPClientError):
status_code = 428
class HTTPTooManyRequests(HTTPClientError):
status_code = 429
class HTTPRequestHeaderFieldsTooLarge(HTTPClientError):
status_code = 431
class HTTPUnavailableForLegalReasons(HTTPClientError):
status_code = 451
def __init__(
self,
link: StrOrURL,
*,
headers: Optional[LooseHeaders] = None,
reason: Optional[str] = None,
text: Optional[str] = None,
content_type: Optional[str] = None,
) -> None:
super().__init__(
headers=headers, reason=reason, text=text, content_type=content_type
)
self.headers["Link"] = f'<{str(link)}>; rel="blocked-by"'
self._link = URL(link)
@property
def link(self) -> URL:
return self._link
############################################################
# 5xx Server Error
############################################################
# Response status codes beginning with the digit "5" indicate cases in
# which the server is aware that it has erred or is incapable of
# performing the request. Except when responding to a HEAD request, the
# server SHOULD include an entity containing an explanation of the error
# situation, and whether it is a temporary or permanent condition. User
# agents SHOULD display any included entity to the user. These response
# codes are applicable to any request method.
class HTTPServerError(HTTPError):
pass
class HTTPInternalServerError(HTTPServerError):
status_code = 500
class HTTPNotImplemented(HTTPServerError):
status_code = 501
class HTTPBadGateway(HTTPServerError):
status_code = 502
class HTTPServiceUnavailable(HTTPServerError):
status_code = 503
class HTTPGatewayTimeout(HTTPServerError):
status_code = 504
class HTTPVersionNotSupported(HTTPServerError):
status_code = 505
class HTTPVariantAlsoNegotiates(HTTPServerError):
status_code = 506
class HTTPInsufficientStorage(HTTPServerError):
status_code = 507
class HTTPNotExtended(HTTPServerError):
status_code = 510
class HTTPNetworkAuthenticationRequired(HTTPServerError):
status_code = 511
def _initialize_default_reason() -> None:
for obj in globals().values():
if isinstance(obj, type) and issubclass(obj, HTTPException):
if obj.status_code >= 0:
try:
status = HTTPStatus(obj.status_code)
obj.default_reason = status.phrase
except ValueError:
pass
_initialize_default_reason()
del _initialize_default_reason
| {
"content_hash": "e42433e4a4e8fbb36d26f31cc608d5b1",
"timestamp": "",
"source": "github",
"line_count": 510,
"max_line_length": 80,
"avg_line_length": 23.788235294117648,
"alnum_prop": 0.6201780415430267,
"repo_name": "KeepSafe/aiohttp",
"id": "b22995f39acc831149d7770facbfd3e578889b85",
"size": "12132",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aiohttp/web_exceptions.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "838"
},
{
"name": "CSS",
"bytes": "112"
},
{
"name": "HTML",
"bytes": "4890"
},
{
"name": "Makefile",
"bytes": "3179"
},
{
"name": "PLpgSQL",
"bytes": "765"
},
{
"name": "Python",
"bytes": "1236385"
},
{
"name": "Shell",
"bytes": "2309"
}
],
"symlink_target": ""
} |
import os
c = get_config()
load_subconfig('etc/base_config.py')
load_subconfig('etc/github_auth.py')
c.JupyterHub.hub_ip = '0.0.0.0'
c.JupyterHub.proxy_api_ip = '0.0.0.0'
c.JupyterHub.spawner_class = 'everware.CustomSwarmSpawner'
c.Spawner.tls = True
c.DockerSpawner.tls = True
# Change this setting:
# IP of the machine where the Everware can be contacted
c.DockerSpawner.hub_ip_connect = os.environ['DOCKER_PUBLIC_IP']
| {
"content_hash": "e4e15218bea8334482d7a3d35a8f72da",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 63,
"avg_line_length": 26.5625,
"alnum_prop": 0.7388235294117647,
"repo_name": "sashabaranov/everware",
"id": "a2c716a8b097a818fb0c2948b04102f7e761d804",
"size": "661",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "etc/container_swarm_config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "24583"
},
{
"name": "HTML",
"bytes": "31426"
},
{
"name": "JavaScript",
"bytes": "15382"
},
{
"name": "Makefile",
"bytes": "2859"
},
{
"name": "Python",
"bytes": "98039"
},
{
"name": "Shell",
"bytes": "2408"
}
],
"symlink_target": ""
} |
'''
Utilities to dynamically load plugin modules.
Modules imported this way remain accessible to static imports, regardless of
the order in which they are imported. For modules that are not part of an
existing package tree, use create_subpackage() to dynamically create a package
for them before loading them.
'''
import pkgutil
import sys
import types
from heat.openstack.common import log as logging
from heat.openstack.common.gettextutils import _
logger = logging.getLogger(__name__)
def _module_name(*components):
'''Assemble a fully-qualified module name from its components.'''
return '.'.join(components)
def create_subpackage(path, parent_package_name, subpackage_name="plugins"):
'''
Dynamically create a package into which to load plugins.
This allows us to not include an __init__.py in the plugins directory. We
must still create a package for plugins to go in, otherwise we get warning
messages during import. This also provides a convenient place to store the
path(s) to the plugins directory.
'''
package_name = _module_name(parent_package_name, subpackage_name)
package = types.ModuleType(package_name)
package.__path__ = [path] if isinstance(path, basestring) else list(path)
sys.modules[package_name] = package
return package
def _import_module(importer, module_name, package):
'''
Import a module dynamically into the specified package, given its name and
PEP302 Importer object (which knows the path to look in).
'''
# Duplicate copies of modules are bad, so check if this has already been
# imported statically
if module_name in sys.modules:
return sys.modules[module_name]
loader = importer.find_module(module_name)
if loader is None:
return None
module = loader.load_module(module_name)
# Make this accessible through the parent package for static imports
local_name = module_name.partition(package.__name__ + '.')[2]
module_components = local_name.split('.')
parent = reduce(getattr, module_components[:-1], package)
setattr(parent, module_components[-1], module)
return module
def load_modules(package, ignore_error=False):
'''Dynamically load all modules from a given package.'''
path = package.__path__
pkg_prefix = package.__name__ + '.'
for importer, module_name, is_package in pkgutil.walk_packages(path,
pkg_prefix):
try:
module = _import_module(importer, module_name, package)
except ImportError:
logger.error(_('Failed to import module %s') % module_name)
if not ignore_error:
raise
else:
if module is not None:
yield module
| {
"content_hash": "1a6565a3179da9dc779a495c3ae9035b",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 79,
"avg_line_length": 33.36904761904762,
"alnum_prop": 0.674634320371031,
"repo_name": "ntt-sic/heat",
"id": "b59a7519e1c70a2c3ba0cb5b8cdabb92cc760d01",
"size": "3423",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/common/plugin_loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "3336181"
},
{
"name": "Shell",
"bytes": "22168"
}
],
"symlink_target": ""
} |
"""
===============================================
.. module:: evodjango.i18n.models
:platform: Django
:synopsis:
.. moduleauthor:: (C) 2014 Oliver Gutiérrez
# TODO: Automatic translation support using Google Translate
"""
# Django imports
from django.db import models
from django.utils.translation import get_language, ugettext_lazy as _
from django.utils.functional import curry
from django.conf import settings
from django import forms
# EVODjango imports
from evodjango.models import JSONField
from evodjango.forms import TinyMCEWidget
from evodjango.i18n.forms import I18NField
class I18NTextField(JSONField):
"""
Internationalization TextField
"""
description = _('Internationalization TextField')
__metaclass__ = models.SubfieldBase
def formfield(self, **kwargs):
"""
Form field method overload
"""
kwargs.setdefault('required',not self.blank)
kwargs.setdefault('label',self.verbose_name)
return I18NField(**kwargs)
def contribute_to_class(self, cls, name):
"""
Contribute to class adding localized_FIELD methods to the model containing this field
"""
def get_localized_version(modelobj,lang=None):
"""
Function to show localized version of a field
"""
data=getattr(modelobj,name)
if lang is None:
lang=get_language()
if lang in data:
return data[lang]
return ''
setattr(cls, 'localized_%s' % name, get_localized_version)
for lang,langname in settings.LANGUAGES:
setattr(cls, 'localized_%s_%s' % (name,lang), curry(get_localized_version,lang=lang))
# Call original method
super(I18NTextField,self).contribute_to_class(cls, name)
class I18NCharField(I18NTextField):
"""
Internationalization CharField
"""
description = _('Internationalization CharField')
__metaclass__ = models.SubfieldBase
def formfield(self,**kwargs):
"""
Form field method overload
"""
kwargs.setdefault('max_length',self.max_length)
kwargs['widget']=forms.TextInput
return super(I18NCharField,self).formfield(**kwargs)
class I18NHTMLField(I18NTextField):
"""
Internationalization HTMLField
"""
description = _('Internationalization HTMLField')
__metaclass__ = models.SubfieldBase
def formfield(self,**kwargs):
"""
Form field method overload
"""
kwargs['widget']=TinyMCEWidget
return super(I18NHTMLField,self).formfield(**kwargs)
| {
"content_hash": "f882dc3342581a81ee0009a5bd09860d",
"timestamp": "",
"source": "github",
"line_count": 89,
"max_line_length": 97,
"avg_line_length": 29.820224719101123,
"alnum_prop": 0.6318764129615675,
"repo_name": "olivergs/evodjango",
"id": "e60fad10b4a38270100b1b6e5770f8610e2ba025",
"size": "2679",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "evodjango/i18n/models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "105275"
}
],
"symlink_target": ""
} |
import rlp
from ethereum.transactions import Transaction
from ethereum.utils import privtoaddr
from eth_tester_client.utils import (
mk_random_privkey,
encode_address,
encode_data,
)
from web3.providers.rpc import TestRPCProvider
def test_eth_sendRawTransaction(web3, wait_for_transaction, extra_accounts):
private_key = mk_random_privkey()
address = encode_address(privtoaddr(private_key))
funding_txn_hash = web3.eth.sendTransaction({
"from": web3.eth.coinbase,
"to": address,
"value": 10000000000000000,
})
wait_for_transaction(web3, funding_txn_hash)
if isinstance(web3.currentProvider, TestRPCProvider):
# ethereum-tester-client doesn't quite implement the
# `sendRawTransaction` correctly because of how the underlying tester
# evm works. It needs to know about the address for this to work.
web3.personal.importRawKey(private_key, "password")
web3.personal.unlockAccount(address, "password")
initial_balance = web3.eth.getBalance(extra_accounts[1])
tx = Transaction(
web3.eth.getTransactionCount(address),
web3.eth.gasPrice,
100000,
extra_accounts[1],
1234,
'',
)
tx.sign(private_key)
raw_tx = rlp.encode(tx)
raw_tx_hex = encode_data(raw_tx)
txn_hash = web3.eth.sendRawTransaction(raw_tx_hex)
wait_for_transaction(web3, txn_hash)
txn_receipt = web3.eth.getTransactionReceipt(txn_hash)
after_balance = web3.eth.getBalance(extra_accounts[1])
assert after_balance - initial_balance == 1234
| {
"content_hash": "3e13e7460a0e854ed4de723a04c6f946",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 77,
"avg_line_length": 29.62962962962963,
"alnum_prop": 0.68625,
"repo_name": "shravan-shandilya/web3.py",
"id": "0d52fd52b7266a24b595fd2e54d72ad9798c7045",
"size": "1600",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/eth-module/test_eth_sendRawTransaction.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "926"
},
{
"name": "Python",
"bytes": "306962"
}
],
"symlink_target": ""
} |
"""
Abstract base class for dhcp providers.
"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class BaseDHCP(object):
"""Base class for DHCP provider APIs."""
@abc.abstractmethod
def update_port_dhcp_opts(self, port_id, dhcp_options, token=None):
"""Update one or more DHCP options on the specified port.
:param port_id: designate which port these attributes
will be applied to.
:param dhcp_options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param token: An optional authentication token.
:raises: FailedToUpdateDHCPOptOnPort
"""
@abc.abstractmethod
def update_dhcp_opts(self, task, options, vifs=None):
"""Send or update the DHCP BOOT options for this node.
:param task: A TaskManager instance.
:param options: this will be a list of dicts, e.g.
::
[{'opt_name': 'bootfile-name',
'opt_value': 'pxelinux.0'},
{'opt_name': 'server-ip-address',
'opt_value': '123.123.123.456'},
{'opt_name': 'tftp-server',
'opt_value': '123.123.123.123'}]
:param vifs: A dict with keys 'ports' and 'portgroups' and
dicts as values. Each dict has key/value pairs of the form
<ironic UUID>:<neutron port UUID>. e.g.
::
{'ports': {'port.uuid': vif.id},
'portgroups': {'portgroup.uuid': vif.id}}
If the value is None, will get the list of ports/portgroups
from the Ironic port/portgroup objects.
:raises: FailedToUpdateDHCPOptOnPort
"""
@abc.abstractmethod
def get_ip_addresses(self, task):
"""Get IP addresses for all ports/portgroups in `task`.
:param task: A TaskManager instance.
:returns: List of IP addresses associated with
task's ports and portgroups.
"""
def clean_dhcp_opts(self, task):
"""Clean up the DHCP BOOT options for all ports in `task`.
:param task: A TaskManager instance.
:raises: FailedToCleanDHCPOpts
"""
pass
| {
"content_hash": "f8d36a27bf16c01677612357b8dc6943",
"timestamp": "",
"source": "github",
"line_count": 81,
"max_line_length": 71,
"avg_line_length": 33.18518518518518,
"alnum_prop": 0.5107886904761905,
"repo_name": "SauloAislan/ironic",
"id": "94f61fd091a4d326226daf74a8d6cbd70c2d64e5",
"size": "3318",
"binary": false,
"copies": "4",
"ref": "refs/heads/SauloAislan-WIP",
"path": "ironic/dhcp/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "349"
},
{
"name": "Python",
"bytes": "5596702"
},
{
"name": "Shell",
"bytes": "119832"
}
],
"symlink_target": ""
} |
"""
=================================================
Demo of affinity propagation clustering algorithm
=================================================
Reference:
Brendan J. Frey and Delbert Dueck, "Clustering by Passing Messages
Between Data Points", Science Feb. 2007
"""
print(__doc__)
from sklearn.cluster import AffinityPropagation
from sklearn import metrics
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, labels_true = make_blobs(n_samples=300, centers=centers, cluster_std=0.5,
random_state=0)
##############################################################################
# Compute Affinity Propagation
af = AffinityPropagation(preference=-50).fit(X)
cluster_centers_indices = af.cluster_centers_indices_
labels = af.labels_
n_clusters_ = len(cluster_centers_indices)
print('Estimated number of clusters: %d' % n_clusters_)
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels_true, labels))
print("Completeness: %0.3f" % metrics.completeness_score(labels_true, labels))
print("V-measure: %0.3f" % metrics.v_measure_score(labels_true, labels))
print("Adjusted Rand Index: %0.3f"
% metrics.adjusted_rand_score(labels_true, labels))
print("Adjusted Mutual Information: %0.3f"
% metrics.adjusted_mutual_info_score(labels_true, labels))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, labels, metric='sqeuclidean'))
##############################################################################
# Plot result
import pylab as pl
from itertools import cycle
pl.close('all')
pl.figure(1)
pl.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
class_members = labels == k
cluster_center = X[cluster_centers_indices[k]]
pl.plot(X[class_members, 0], X[class_members, 1], col + '.')
pl.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
for x in X[class_members]:
pl.plot([cluster_center[0], x[0]], [cluster_center[1], x[1]], col)
pl.title('Estimated number of clusters: %d' % n_clusters_)
pl.show()
| {
"content_hash": "900b7ce80d82d6d2b3d94bc06f92a1d4",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 78,
"avg_line_length": 36.806451612903224,
"alnum_prop": 0.6007887817703769,
"repo_name": "fspaolo/scikit-learn",
"id": "7abd6822ffc5dd48c010e3f6c7567201197a8a5d",
"size": "2282",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "examples/cluster/plot_affinity_propagation.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
} |
"""empty message
Revision ID: 2956c7423d73
Revises: None
Create Date: 2014-03-14 00:56:06.192705
"""
# revision identifiers, used by Alembic.
revision = '2956c7423d73'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
pass
def downgrade():
pass
| {
"content_hash": "6f9049330a8fc611e81eb86c5e397ead",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 40,
"avg_line_length": 13.363636363636363,
"alnum_prop": 0.7210884353741497,
"repo_name": "sniboboof/flask-microblog",
"id": "8797d12ec598529af45340d51a1849c2a32f7bdd",
"size": "294",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "flask-microblog/migrations/versions/2956c7423d73_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PHP",
"bytes": "38"
},
{
"name": "Python",
"bytes": "23441"
}
],
"symlink_target": ""
} |
from msrest.paging import Paged
class JobCollectionDefinitionPaged(Paged):
"""
A paging container for iterating over a list of JobCollectionDefinition object
"""
_attribute_map = {
'next_link': {'key': 'nextLink', 'type': 'str'},
'current_page': {'key': 'value', 'type': '[JobCollectionDefinition]'}
}
def __init__(self, *args, **kwargs):
super(JobCollectionDefinitionPaged, self).__init__(*args, **kwargs)
| {
"content_hash": "2b3fe50fdff6b0700cb5a950c9efc3b4",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 28.75,
"alnum_prop": 0.6282608695652174,
"repo_name": "lmazuel/azure-sdk-for-python",
"id": "c14768483eb350e9c5cf59dab2a4ec90b17fd585",
"size": "934",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "azure-mgmt-scheduler/azure/mgmt/scheduler/models/job_collection_definition_paged.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "42572767"
}
],
"symlink_target": ""
} |
class numChain(object):
def __init__(self, numberOfLinks, desiredValue):
self.numberOfLinks = numberOfLinks
self.desiredValue = desiredValue
self.links=[]
self.links.append(1)
self.stringGroup=[]
self.allPotentialValues=set()
for x in range(numberOfLinks+1):
self.stringGroup.append([x,])
print self.calculateNextLinks()
def calculateNextLinks(self):
potentialNextValues = set()
currentNumberOfLinks=len(self.links)
if (self.links[-1]==self.desiredValue):
return self.links
elif (currentNumberOfLinks>self.numberOfLinks):
return False
else:
for outterLinkLocation in range(currentNumberOfLinks):
for innerLinkLocation in range(outterLinkLocation, currentNumberOfLinks):
self.allPotentialValues.add(self.links[outterLinkLocation]+self.links[innerLinkLocation])
potentialNextValues.add(self.links[outterLinkLocation]+self.links[innerLinkLocation])
for eachLink in potentialNextValues:
self.links.append(eachLink)
done=self.calculateNextLinks()
if (done):
return done
else:
self.links.pop()
return False
| {
"content_hash": "49a9e768bf7ea71358a18ccfded3db7a",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 109,
"avg_line_length": 38.77142857142857,
"alnum_prop": 0.604274134119381,
"repo_name": "jeremiahmarks/dangerzone",
"id": "cf8b2cc1fda3a839a4f14abe27aa28f9ef6a3c6b",
"size": "1833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/python/nchain2.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5082"
},
{
"name": "HTML",
"bytes": "2728663"
},
{
"name": "Java",
"bytes": "18658"
},
{
"name": "JavaScript",
"bytes": "4591"
},
{
"name": "PHP",
"bytes": "61100"
},
{
"name": "Python",
"bytes": "419882"
},
{
"name": "Ruby",
"bytes": "126786"
},
{
"name": "Shell",
"bytes": "130622"
}
],
"symlink_target": ""
} |
import datetime
import io
from os import linesep
import re
import sys
from pip._vendor.toml.tz import TomlTz
if sys.version_info < (3,):
_range = xrange # noqa: F821
else:
unicode = str
_range = range
basestring = str
unichr = chr
def _detect_pathlib_path(p):
if (3, 4) <= sys.version_info:
import pathlib
if isinstance(p, pathlib.PurePath):
return True
return False
def _ispath(p):
if isinstance(p, (bytes, basestring)):
return True
return _detect_pathlib_path(p)
def _getpath(p):
if (3, 6) <= sys.version_info:
import os
return os.fspath(p)
if _detect_pathlib_path(p):
return str(p)
return p
try:
FNFError = FileNotFoundError
except NameError:
FNFError = IOError
TIME_RE = re.compile(r"([0-9]{2}):([0-9]{2}):([0-9]{2})(\.([0-9]{3,6}))?")
class TomlDecodeError(ValueError):
"""Base toml Exception / Error."""
def __init__(self, msg, doc, pos):
lineno = doc.count('\n', 0, pos) + 1
colno = pos - doc.rfind('\n', 0, pos)
emsg = '{} (line {} column {} char {})'.format(msg, lineno, colno, pos)
ValueError.__init__(self, emsg)
self.msg = msg
self.doc = doc
self.pos = pos
self.lineno = lineno
self.colno = colno
# Matches a TOML number, which allows underscores for readability
_number_with_underscores = re.compile('([0-9])(_([0-9]))*')
class CommentValue(object):
def __init__(self, val, comment, beginline, _dict):
self.val = val
separator = "\n" if beginline else " "
self.comment = separator + comment
self._dict = _dict
def __getitem__(self, key):
return self.val[key]
def __setitem__(self, key, value):
self.val[key] = value
def dump(self, dump_value_func):
retstr = dump_value_func(self.val)
if isinstance(self.val, self._dict):
return self.comment + "\n" + unicode(retstr)
else:
return unicode(retstr) + self.comment
def _strictly_valid_num(n):
n = n.strip()
if not n:
return False
if n[0] == '_':
return False
if n[-1] == '_':
return False
if "_." in n or "._" in n:
return False
if len(n) == 1:
return True
if n[0] == '0' and n[1] not in ['.', 'o', 'b', 'x']:
return False
if n[0] == '+' or n[0] == '-':
n = n[1:]
if len(n) > 1 and n[0] == '0' and n[1] != '.':
return False
if '__' in n:
return False
return True
def load(f, _dict=dict, decoder=None):
"""Parses named file or files as toml and returns a dictionary
Args:
f: Path to the file to open, array of files to read into single dict
or a file descriptor
_dict: (optional) Specifies the class of the returned toml dictionary
decoder: The decoder to use
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError -- When f is invalid type
TomlDecodeError: Error while decoding toml
IOError / FileNotFoundError -- When an array with no valid (existing)
(Python 2 / Python 3) file paths is passed
"""
if _ispath(f):
with io.open(_getpath(f), encoding='utf-8') as ffile:
return loads(ffile.read(), _dict, decoder)
elif isinstance(f, list):
from os import path as op
from warnings import warn
if not [path for path in f if op.exists(path)]:
error_msg = "Load expects a list to contain filenames only."
error_msg += linesep
error_msg += ("The list needs to contain the path of at least one "
"existing file.")
raise FNFError(error_msg)
if decoder is None:
decoder = TomlDecoder(_dict)
d = decoder.get_empty_table()
for l in f: # noqa: E741
if op.exists(l):
d.update(load(l, _dict, decoder))
else:
warn("Non-existent filename in list with at least one valid "
"filename")
return d
else:
try:
return loads(f.read(), _dict, decoder)
except AttributeError:
raise TypeError("You can only load a file descriptor, filename or "
"list")
_groupname_re = re.compile(r'^[A-Za-z0-9_-]+$')
def loads(s, _dict=dict, decoder=None):
"""Parses string as toml
Args:
s: String to be parsed
_dict: (optional) Specifies the class of the returned toml dictionary
Returns:
Parsed toml file represented as a dictionary
Raises:
TypeError: When a non-string is passed
TomlDecodeError: Error while decoding toml
"""
implicitgroups = []
if decoder is None:
decoder = TomlDecoder(_dict)
retval = decoder.get_empty_table()
currentlevel = retval
if not isinstance(s, basestring):
raise TypeError("Expecting something like a string")
if not isinstance(s, unicode):
s = s.decode('utf8')
original = s
sl = list(s)
openarr = 0
openstring = False
openstrchar = ""
multilinestr = False
arrayoftables = False
beginline = True
keygroup = False
dottedkey = False
keyname = 0
key = ''
prev_key = ''
line_no = 1
for i, item in enumerate(sl):
if item == '\r' and sl[i + 1] == '\n':
sl[i] = ' '
continue
if keyname:
key += item
if item == '\n':
raise TomlDecodeError("Key name found without value."
" Reached end of line.", original, i)
if openstring:
if item == openstrchar:
oddbackslash = False
k = 1
while i >= k and sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
if not oddbackslash:
keyname = 2
openstring = False
openstrchar = ""
continue
elif keyname == 1:
if item.isspace():
keyname = 2
continue
elif item == '.':
dottedkey = True
continue
elif item.isalnum() or item == '_' or item == '-':
continue
elif (dottedkey and sl[i - 1] == '.' and
(item == '"' or item == "'")):
openstring = True
openstrchar = item
continue
elif keyname == 2:
if item.isspace():
if dottedkey:
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '.':
dottedkey = True
nextitem = sl[i + 1]
if not nextitem.isspace() and nextitem != '.':
keyname = 1
continue
if item == '=':
keyname = 0
prev_key = key[:-1].rstrip()
key = ''
dottedkey = False
else:
raise TomlDecodeError("Found invalid character in key name: '" +
item + "'. Try quoting the key name.",
original, i)
if item == "'" and openstrchar != '"':
k = 1
try:
while sl[i - k] == "'":
k += 1
if k == 3:
break
except IndexError:
pass
if k == 3:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = "'"
else:
openstrchar = ""
if item == '"' and openstrchar != "'":
oddbackslash = False
k = 1
tripquote = False
try:
while sl[i - k] == '"':
k += 1
if k == 3:
tripquote = True
break
if k == 1 or (k == 3 and tripquote):
while sl[i - k] == '\\':
oddbackslash = not oddbackslash
k += 1
except IndexError:
pass
if not oddbackslash:
if tripquote:
multilinestr = not multilinestr
openstring = multilinestr
else:
openstring = not openstring
if openstring:
openstrchar = '"'
else:
openstrchar = ""
if item == '#' and (not openstring and not keygroup and
not arrayoftables):
j = i
comment = ""
try:
while sl[j] != '\n':
comment += s[j]
sl[j] = ' '
j += 1
except IndexError:
break
if not openarr:
decoder.preserve_comment(line_no, prev_key, comment, beginline)
if item == '[' and (not openstring and not keygroup and
not arrayoftables):
if beginline:
if len(sl) > i + 1 and sl[i + 1] == '[':
arrayoftables = True
else:
keygroup = True
else:
openarr += 1
if item == ']' and not openstring:
if keygroup:
keygroup = False
elif arrayoftables:
if sl[i - 1] == ']':
arrayoftables = False
else:
openarr -= 1
if item == '\n':
if openstring or multilinestr:
if not multilinestr:
raise TomlDecodeError("Unbalanced quotes", original, i)
if ((sl[i - 1] == "'" or sl[i - 1] == '"') and (
sl[i - 2] == sl[i - 1])):
sl[i] = sl[i - 1]
if sl[i - 3] == sl[i - 1]:
sl[i - 3] = ' '
elif openarr:
sl[i] = ' '
else:
beginline = True
line_no += 1
elif beginline and sl[i] != ' ' and sl[i] != '\t':
beginline = False
if not keygroup and not arrayoftables:
if sl[i] == '=':
raise TomlDecodeError("Found empty keyname. ", original, i)
keyname = 1
key += item
if keyname:
raise TomlDecodeError("Key name found without value."
" Reached end of file.", original, len(s))
if openstring: # reached EOF and have an unterminated string
raise TomlDecodeError("Unterminated string found."
" Reached end of file.", original, len(s))
s = ''.join(sl)
s = s.split('\n')
multikey = None
multilinestr = ""
multibackslash = False
pos = 0
for idx, line in enumerate(s):
if idx > 0:
pos += len(s[idx - 1]) + 1
decoder.embed_comments(idx, currentlevel)
if not multilinestr or multibackslash or '\n' not in multilinestr:
line = line.strip()
if line == "" and (not multikey or multibackslash):
continue
if multikey:
if multibackslash:
multilinestr += line
else:
multilinestr += line
multibackslash = False
closed = False
if multilinestr[0] == '[':
closed = line[-1] == ']'
elif len(line) > 2:
closed = (line[-1] == multilinestr[0] and
line[-2] == multilinestr[0] and
line[-3] == multilinestr[0])
if closed:
try:
value, vtype = decoder.load_value(multilinestr)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
currentlevel[multikey] = value
multikey = None
multilinestr = ""
else:
k = len(multilinestr) - 1
while k > -1 and multilinestr[k] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = multilinestr[:-1]
else:
multilinestr += "\n"
continue
if line[0] == '[':
arrayoftables = False
if len(line) == 1:
raise TomlDecodeError("Opening key group bracket on line by "
"itself.", original, pos)
if line[1] == '[':
arrayoftables = True
line = line[2:]
splitstr = ']]'
else:
line = line[1:]
splitstr = ']'
i = 1
quotesplits = decoder._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and splitstr in quotesplit:
break
i += quotesplit.count(splitstr)
quoted = not quoted
line = line.split(splitstr, i)
if len(line) < i + 1 or line[-1].strip() != "":
raise TomlDecodeError("Key group not on a line by itself.",
original, pos)
groups = splitstr.join(line[:-1]).split('.')
i = 0
while i < len(groups):
groups[i] = groups[i].strip()
if len(groups[i]) > 0 and (groups[i][0] == '"' or
groups[i][0] == "'"):
groupstr = groups[i]
j = i + 1
while ((not groupstr[0] == groupstr[-1]) or
len(groupstr) == 1):
j += 1
if j > len(groups) + 2:
raise TomlDecodeError("Invalid group name '" +
groupstr + "' Something " +
"went wrong.", original, pos)
groupstr = '.'.join(groups[i:j]).strip()
groups[i] = groupstr[1:-1]
groups[i + 1:j] = []
else:
if not _groupname_re.match(groups[i]):
raise TomlDecodeError("Invalid group name '" +
groups[i] + "'. Try quoting it.",
original, pos)
i += 1
currentlevel = retval
for i in _range(len(groups)):
group = groups[i]
if group == "":
raise TomlDecodeError("Can't have a keygroup with an empty "
"name", original, pos)
try:
currentlevel[group]
if i == len(groups) - 1:
if group in implicitgroups:
implicitgroups.remove(group)
if arrayoftables:
raise TomlDecodeError("An implicitly defined "
"table can't be an array",
original, pos)
elif arrayoftables:
currentlevel[group].append(decoder.get_empty_table()
)
else:
raise TomlDecodeError("What? " + group +
" already exists?" +
str(currentlevel),
original, pos)
except TypeError:
currentlevel = currentlevel[-1]
if group not in currentlevel:
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
except KeyError:
if i != len(groups) - 1:
implicitgroups.append(group)
currentlevel[group] = decoder.get_empty_table()
if i == len(groups) - 1 and arrayoftables:
currentlevel[group] = [decoder.get_empty_table()]
currentlevel = currentlevel[group]
if arrayoftables:
try:
currentlevel = currentlevel[-1]
except KeyError:
pass
elif line[0] == "{":
if line[-1] != "}":
raise TomlDecodeError("Line breaks are not allowed in inline"
"objects", original, pos)
try:
decoder.load_inline_object(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
elif "=" in line:
try:
ret = decoder.load_line(line, currentlevel, multikey,
multibackslash)
except ValueError as err:
raise TomlDecodeError(str(err), original, pos)
if ret is not None:
multikey, multilinestr, multibackslash = ret
return retval
def _load_date(val):
microsecond = 0
tz = None
try:
if len(val) > 19:
if val[19] == '.':
if val[-1].upper() == 'Z':
subsecondval = val[20:-1]
tzval = "Z"
else:
subsecondvalandtz = val[20:]
if '+' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('+')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
elif '-' in subsecondvalandtz:
splitpoint = subsecondvalandtz.index('-')
subsecondval = subsecondvalandtz[:splitpoint]
tzval = subsecondvalandtz[splitpoint:]
else:
tzval = None
subsecondval = subsecondvalandtz
if tzval is not None:
tz = TomlTz(tzval)
microsecond = int(int(subsecondval) *
(10 ** (6 - len(subsecondval))))
else:
tz = TomlTz(val[19:])
except ValueError:
tz = None
if "-" not in val[1:]:
return None
try:
if len(val) == 10:
d = datetime.date(
int(val[:4]), int(val[5:7]),
int(val[8:10]))
else:
d = datetime.datetime(
int(val[:4]), int(val[5:7]),
int(val[8:10]), int(val[11:13]),
int(val[14:16]), int(val[17:19]), microsecond, tz)
except ValueError:
return None
return d
def _load_unicode_escapes(v, hexbytes, prefix):
skip = False
i = len(v) - 1
while i > -1 and v[i] == '\\':
skip = not skip
i -= 1
for hx in hexbytes:
if skip:
skip = False
i = len(hx) - 1
while i > -1 and hx[i] == '\\':
skip = not skip
i -= 1
v += prefix
v += hx
continue
hxb = ""
i = 0
hxblen = 4
if prefix == "\\U":
hxblen = 8
hxb = ''.join(hx[i:i + hxblen]).lower()
if hxb.strip('0123456789abcdef'):
raise ValueError("Invalid escape sequence: " + hxb)
if hxb[0] == "d" and hxb[1].strip('01234567'):
raise ValueError("Invalid escape sequence: " + hxb +
". Only scalar unicode points are allowed.")
v += unichr(int(hxb, 16))
v += unicode(hx[len(hxb):])
return v
# Unescape TOML string values.
# content after the \
_escapes = ['0', 'b', 'f', 'n', 'r', 't', '"']
# What it should be replaced by
_escapedchars = ['\0', '\b', '\f', '\n', '\r', '\t', '\"']
# Used for substitution
_escape_to_escapedchars = dict(zip(_escapes, _escapedchars))
def _unescape(v):
"""Unescape characters in a TOML string."""
i = 0
backslash = False
while i < len(v):
if backslash:
backslash = False
if v[i] in _escapes:
v = v[:i - 1] + _escape_to_escapedchars[v[i]] + v[i + 1:]
elif v[i] == '\\':
v = v[:i - 1] + v[i:]
elif v[i] == 'u' or v[i] == 'U':
i += 1
else:
raise ValueError("Reserved escape sequence used")
continue
elif v[i] == '\\':
backslash = True
i += 1
return v
class InlineTableDict(object):
"""Sentinel subclass of dict for inline tables."""
class TomlDecoder(object):
def __init__(self, _dict=dict):
self._dict = _dict
def get_empty_table(self):
return self._dict()
def get_empty_inline_table(self):
class DynamicInlineTableDict(self._dict, InlineTableDict):
"""Concrete sentinel subclass for inline tables.
It is a subclass of _dict which is passed in dynamically at load
time
It is also a subclass of InlineTableDict
"""
return DynamicInlineTableDict()
def load_inline_object(self, line, currentlevel, multikey=False,
multibackslash=False):
candidate_groups = line[1:-1].split(",")
groups = []
if len(candidate_groups) == 1 and not candidate_groups[0].strip():
candidate_groups.pop()
while len(candidate_groups) > 0:
candidate_group = candidate_groups.pop(0)
try:
_, value = candidate_group.split('=', 1)
except ValueError:
raise ValueError("Invalid inline table encountered")
value = value.strip()
if ((value[0] == value[-1] and value[0] in ('"', "'")) or (
value[0] in '-0123456789' or
value in ('true', 'false') or
(value[0] == "[" and value[-1] == "]") or
(value[0] == '{' and value[-1] == '}'))):
groups.append(candidate_group)
elif len(candidate_groups) > 0:
candidate_groups[0] = (candidate_group + "," +
candidate_groups[0])
else:
raise ValueError("Invalid inline table value encountered")
for group in groups:
status = self.load_line(group, currentlevel, multikey,
multibackslash)
if status is not None:
break
def _get_split_on_quotes(self, line):
doublequotesplits = line.split('"')
quoted = False
quotesplits = []
if len(doublequotesplits) > 1 and "'" in doublequotesplits[0]:
singlequotesplits = doublequotesplits[0].split("'")
doublequotesplits = doublequotesplits[1:]
while len(singlequotesplits) % 2 == 0 and len(doublequotesplits):
singlequotesplits[-1] += '"' + doublequotesplits[0]
doublequotesplits = doublequotesplits[1:]
if "'" in singlequotesplits[-1]:
singlequotesplits = (singlequotesplits[:-1] +
singlequotesplits[-1].split("'"))
quotesplits += singlequotesplits
for doublequotesplit in doublequotesplits:
if quoted:
quotesplits.append(doublequotesplit)
else:
quotesplits += doublequotesplit.split("'")
quoted = not quoted
return quotesplits
def load_line(self, line, currentlevel, multikey, multibackslash):
i = 1
quotesplits = self._get_split_on_quotes(line)
quoted = False
for quotesplit in quotesplits:
if not quoted and '=' in quotesplit:
break
i += quotesplit.count('=')
quoted = not quoted
pair = line.split('=', i)
strictly_valid = _strictly_valid_num(pair[-1])
if _number_with_underscores.match(pair[-1]):
pair[-1] = pair[-1].replace('_', '')
while len(pair[-1]) and (pair[-1][0] != ' ' and pair[-1][0] != '\t' and
pair[-1][0] != "'" and pair[-1][0] != '"' and
pair[-1][0] != '[' and pair[-1][0] != '{' and
pair[-1].strip() != 'true' and
pair[-1].strip() != 'false'):
try:
float(pair[-1])
break
except ValueError:
pass
if _load_date(pair[-1]) is not None:
break
if TIME_RE.match(pair[-1]):
break
i += 1
prev_val = pair[-1]
pair = line.split('=', i)
if prev_val == pair[-1]:
raise ValueError("Invalid date or number")
if strictly_valid:
strictly_valid = _strictly_valid_num(pair[-1])
pair = ['='.join(pair[:-1]).strip(), pair[-1].strip()]
if '.' in pair[0]:
if '"' in pair[0] or "'" in pair[0]:
quotesplits = self._get_split_on_quotes(pair[0])
quoted = False
levels = []
for quotesplit in quotesplits:
if quoted:
levels.append(quotesplit)
else:
levels += [level.strip() for level in
quotesplit.split('.')]
quoted = not quoted
else:
levels = pair[0].split('.')
while levels[-1] == "":
levels = levels[:-1]
for level in levels[:-1]:
if level == "":
continue
if level not in currentlevel:
currentlevel[level] = self.get_empty_table()
currentlevel = currentlevel[level]
pair[0] = levels[-1].strip()
elif (pair[0][0] == '"' or pair[0][0] == "'") and \
(pair[0][-1] == pair[0][0]):
pair[0] = _unescape(pair[0][1:-1])
k, koffset = self._load_line_multiline_str(pair[1])
if k > -1:
while k > -1 and pair[1][k + koffset] == '\\':
multibackslash = not multibackslash
k -= 1
if multibackslash:
multilinestr = pair[1][:-1]
else:
multilinestr = pair[1] + "\n"
multikey = pair[0]
else:
value, vtype = self.load_value(pair[1], strictly_valid)
try:
currentlevel[pair[0]]
raise ValueError("Duplicate keys!")
except TypeError:
raise ValueError("Duplicate keys!")
except KeyError:
if multikey:
return multikey, multilinestr, multibackslash
else:
currentlevel[pair[0]] = value
def _load_line_multiline_str(self, p):
poffset = 0
if len(p) < 3:
return -1, poffset
if p[0] == '[' and (p.strip()[-1] != ']' and
self._load_array_isstrarray(p)):
newp = p[1:].strip().split(',')
while len(newp) > 1 and newp[-1][0] != '"' and newp[-1][0] != "'":
newp = newp[:-2] + [newp[-2] + ',' + newp[-1]]
newp = newp[-1]
poffset = len(p) - len(newp)
p = newp
if p[0] != '"' and p[0] != "'":
return -1, poffset
if p[1] != p[0] or p[2] != p[0]:
return -1, poffset
if len(p) > 5 and p[-1] == p[0] and p[-2] == p[0] and p[-3] == p[0]:
return -1, poffset
return len(p) - 1, poffset
def load_value(self, v, strictly_valid=True):
if not v:
raise ValueError("Empty value is invalid")
if v == 'true':
return (True, "bool")
elif v.lower() == 'true':
raise ValueError("Only all lowercase booleans allowed")
elif v == 'false':
return (False, "bool")
elif v.lower() == 'false':
raise ValueError("Only all lowercase booleans allowed")
elif v[0] == '"' or v[0] == "'":
quotechar = v[0]
testv = v[1:].split(quotechar)
triplequote = False
triplequotecount = 0
if len(testv) > 1 and testv[0] == '' and testv[1] == '':
testv = testv[2:]
triplequote = True
closed = False
for tv in testv:
if tv == '':
if triplequote:
triplequotecount += 1
else:
closed = True
else:
oddbackslash = False
try:
i = -1
j = tv[i]
while j == '\\':
oddbackslash = not oddbackslash
i -= 1
j = tv[i]
except IndexError:
pass
if not oddbackslash:
if closed:
raise ValueError("Found tokens after a closed " +
"string. Invalid TOML.")
else:
if not triplequote or triplequotecount > 1:
closed = True
else:
triplequotecount = 0
if quotechar == '"':
escapeseqs = v.split('\\')[1:]
backslash = False
for i in escapeseqs:
if i == '':
backslash = not backslash
else:
if i[0] not in _escapes and (i[0] != 'u' and
i[0] != 'U' and
not backslash):
raise ValueError("Reserved escape sequence used")
if backslash:
backslash = False
for prefix in ["\\u", "\\U"]:
if prefix in v:
hexbytes = v.split(prefix)
v = _load_unicode_escapes(hexbytes[0], hexbytes[1:],
prefix)
v = _unescape(v)
if len(v) > 1 and v[1] == quotechar and (len(v) < 3 or
v[1] == v[2]):
v = v[2:-2]
return (v[1:-1], "str")
elif v[0] == '[':
return (self.load_array(v), "array")
elif v[0] == '{':
inline_object = self.get_empty_inline_table()
self.load_inline_object(v, inline_object)
return (inline_object, "inline_object")
elif TIME_RE.match(v):
h, m, s, _, ms = TIME_RE.match(v).groups()
time = datetime.time(int(h), int(m), int(s), int(ms) if ms else 0)
return (time, "time")
else:
parsed_date = _load_date(v)
if parsed_date is not None:
return (parsed_date, "date")
if not strictly_valid:
raise ValueError("Weirdness with leading zeroes or "
"underscores in your number.")
itype = "int"
neg = False
if v[0] == '-':
neg = True
v = v[1:]
elif v[0] == '+':
v = v[1:]
v = v.replace('_', '')
lowerv = v.lower()
if '.' in v or ('x' not in v and ('e' in v or 'E' in v)):
if '.' in v and v.split('.', 1)[1] == '':
raise ValueError("This float is missing digits after "
"the point")
if v[0] not in '0123456789':
raise ValueError("This float doesn't have a leading "
"digit")
v = float(v)
itype = "float"
elif len(lowerv) == 3 and (lowerv == 'inf' or lowerv == 'nan'):
v = float(v)
itype = "float"
if itype == "int":
v = int(v, 0)
if neg:
return (0 - v, itype)
return (v, itype)
def bounded_string(self, s):
if len(s) == 0:
return True
if s[-1] != s[0]:
return False
i = -2
backslash = False
while len(s) + i > 0:
if s[i] == "\\":
backslash = not backslash
i -= 1
else:
break
return not backslash
def _load_array_isstrarray(self, a):
a = a[1:-1].strip()
if a != '' and (a[0] == '"' or a[0] == "'"):
return True
return False
def load_array(self, a):
atype = None
retval = []
a = a.strip()
if '[' not in a[1:-1] or "" != a[1:-1].split('[')[0].strip():
strarray = self._load_array_isstrarray(a)
if not a[1:-1].strip().startswith('{'):
a = a[1:-1].split(',')
else:
# a is an inline object, we must find the matching parenthesis
# to define groups
new_a = []
start_group_index = 1
end_group_index = 2
open_bracket_count = 1 if a[start_group_index] == '{' else 0
in_str = False
while end_group_index < len(a[1:]):
if a[end_group_index] == '"' or a[end_group_index] == "'":
if in_str:
backslash_index = end_group_index - 1
while (backslash_index > -1 and
a[backslash_index] == '\\'):
in_str = not in_str
backslash_index -= 1
in_str = not in_str
if not in_str and a[end_group_index] == '{':
open_bracket_count += 1
if in_str or a[end_group_index] != '}':
end_group_index += 1
continue
elif a[end_group_index] == '}' and open_bracket_count > 1:
open_bracket_count -= 1
end_group_index += 1
continue
# Increase end_group_index by 1 to get the closing bracket
end_group_index += 1
new_a.append(a[start_group_index:end_group_index])
# The next start index is at least after the closing
# bracket, a closing bracket can be followed by a comma
# since we are in an array.
start_group_index = end_group_index + 1
while (start_group_index < len(a[1:]) and
a[start_group_index] != '{'):
start_group_index += 1
end_group_index = start_group_index + 1
a = new_a
b = 0
if strarray:
while b < len(a) - 1:
ab = a[b].strip()
while (not self.bounded_string(ab) or
(len(ab) > 2 and
ab[0] == ab[1] == ab[2] and
ab[-2] != ab[0] and
ab[-3] != ab[0])):
a[b] = a[b] + ',' + a[b + 1]
ab = a[b].strip()
if b < len(a) - 2:
a = a[:b + 1] + a[b + 2:]
else:
a = a[:b + 1]
b += 1
else:
al = list(a[1:-1])
a = []
openarr = 0
j = 0
for i in _range(len(al)):
if al[i] == '[':
openarr += 1
elif al[i] == ']':
openarr -= 1
elif al[i] == ',' and not openarr:
a.append(''.join(al[j:i]))
j = i + 1
a.append(''.join(al[j:]))
for i in _range(len(a)):
a[i] = a[i].strip()
if a[i] != '':
nval, ntype = self.load_value(a[i])
if atype:
if ntype != atype:
raise ValueError("Not a homogeneous array")
else:
atype = ntype
retval.append(nval)
return retval
def preserve_comment(self, line_no, key, comment, beginline):
pass
def embed_comments(self, idx, currentlevel):
pass
class TomlPreserveCommentDecoder(TomlDecoder):
def __init__(self, _dict=dict):
self.saved_comments = {}
super(TomlPreserveCommentDecoder, self).__init__(_dict)
def preserve_comment(self, line_no, key, comment, beginline):
self.saved_comments[line_no] = (key, comment, beginline)
def embed_comments(self, idx, currentlevel):
if idx not in self.saved_comments:
return
key, comment, beginline = self.saved_comments[idx]
currentlevel[key] = CommentValue(currentlevel[key], comment, beginline,
self._dict)
| {
"content_hash": "52b686be260400446ced03ba8d8f3df0",
"timestamp": "",
"source": "github",
"line_count": 1057,
"max_line_length": 80,
"avg_line_length": 36.85335856196783,
"alnum_prop": 0.4279406479437285,
"repo_name": "jsirois/pex",
"id": "e071100de0f3ef75b2149857861c2f60fc4e89af",
"size": "38954",
"binary": false,
"copies": "16",
"ref": "refs/heads/main",
"path": "pex/vendor/_vendored/pip/pip/_vendor/toml/decoder.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1379"
},
{
"name": "Python",
"bytes": "2182256"
},
{
"name": "Shell",
"bytes": "1472"
}
],
"symlink_target": ""
} |
"""Tests for tf.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import operator
import numpy as np
from tensorflow.python.eager import function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_state_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import gradient_descent
from tensorflow.python.util import compat
class VariablesTestCase(test.TestCase):
@test_util.run_deprecated_v1
def testDistributeStrategy(self):
v = variables.VariableV1(0.0)
self.assertIsNone(v._distribute_strategy)
@test_util.run_v1_only("b/120545219")
def testInitialization(self):
with self.cached_session():
var0 = variables.VariableV1(0.0)
self.assertEqual("Variable:0", var0.name)
self.assertEqual("Variable", var0._shared_name)
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.get_shape())
self.assertEqual([], var0.shape)
var1 = variables.VariableV1(1.1)
self.assertEqual("Variable_1:0", var1.name)
self.assertEqual("Variable_1", var1._shared_name)
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.get_shape())
self.assertEqual([], var1.shape)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var0)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(var1)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var0))
self.assertAllClose(1.1, self.evaluate(var1))
@test_util.run_v1_only("b/120545219")
def testInitializationOrder(self):
with self.cached_session():
rnd = variables.Variable(random_ops.random_uniform([3, 6]), name="rnd")
self.assertEqual("rnd:0", rnd.name)
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.get_shape())
self.assertEqual([3, 6], rnd.shape)
dep = variables.Variable(rnd.initialized_value(), name="dep")
self.assertEqual("dep:0", dep.name)
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.get_shape())
self.assertEqual([3, 6], dep.shape)
# Currently have to set the shape manually for Add.
added_val = rnd.initialized_value() + dep.initialized_value() + 2.0
added_val.set_shape(rnd.get_shape())
depdep = variables.Variable(added_val, name="depdep")
self.assertEqual("depdep:0", depdep.name)
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.get_shape())
self.assertEqual([3, 6], depdep.shape)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(self.evaluate(rnd), self.evaluate(dep))
self.assertAllClose(
self.evaluate(rnd) + self.evaluate(dep) + 2.0, self.evaluate(depdep))
def testIterable(self):
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable(0.0):
pass
with self.assertRaisesRegexp(TypeError, "not iterable"):
for _ in variables.Variable([0.0, 1.0]):
pass
@test_util.run_deprecated_v1
def testAssignments(self):
with self.cached_session():
var = variables.Variable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.assertAllClose(1.0, self.evaluate(plus_one))
self.assertAllClose(1.0, self.evaluate(var))
self.assertAllClose(-1.0, self.evaluate(minus_one))
self.assertAllClose(-1.0, self.evaluate(var))
self.assertAllClose(4.0, self.evaluate(four))
self.assertAllClose(4.0, self.evaluate(var))
@test_util.run_deprecated_v1
def testResourceAssignments(self):
with self.session(use_gpu=True):
var = resource_variable_ops.ResourceVariable(0.0)
plus_one = var.assign_add(1.0)
minus_one = var.assign_sub(2.0)
four = var.assign(4.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(0.0, self.evaluate(var))
self.evaluate(plus_one)
self.assertAllClose(1.0, self.evaluate(var))
self.evaluate(minus_one)
self.assertAllClose(-1.0, self.evaluate(var))
self.evaluate(four)
self.assertAllClose(4.0, self.evaluate(var))
def testZeroSizeStringAssign(self):
with self.cached_session() as sess:
array = variables.VariableV1(
initial_value=array_ops.zeros((0,), dtype=dtypes.string),
name="foo",
trainable=False,
collections=[ops.GraphKeys.LOCAL_VARIABLES])
self.evaluate(variables.local_variables_initializer())
old_value = array.value()
copy_op = array.assign(old_value)
self.assertEqual([], list(self.evaluate(copy_op)))
def _countUpToTest(self, dtype):
with self.cached_session():
zero = constant_op.constant(0, dtype=dtype)
var = variables.Variable(zero)
count_up_to = var.count_up_to(3)
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(var))
self.assertEqual(0, self.evaluate(count_up_to))
self.assertEqual(1, self.evaluate(var))
self.assertEqual(1, self.evaluate(count_up_to))
self.assertEqual(2, self.evaluate(var))
self.assertEqual(2, self.evaluate(count_up_to))
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
with self.assertRaisesOpError("Reached limit of 3"):
self.evaluate(count_up_to)
self.assertEqual(3, self.evaluate(var))
@test_util.run_deprecated_v1
def testCountUpToInt32(self):
self._countUpToTest(dtypes.int32)
@test_util.run_deprecated_v1
def testCountUpToInt64(self):
self._countUpToTest(dtypes.int64)
@test_util.run_v1_only("b/120545219")
def testControlDepsNone(self):
with self.cached_session():
c = constant_op.constant(1.0)
with ops.control_dependencies([c]):
# d get the control dep.
d = constant_op.constant(2.0)
# variables do not.
var_x = variables.VariableV1(2.0)
self.assertEqual([c.op], d.op.control_inputs)
self.assertEqual([], var_x.initializer.control_inputs)
self.assertEqual([], var_x.value().op.control_inputs)
self.assertEqual([], var_x._ref().op.control_inputs) # pylint: disable=protected-access
@test_util.run_v1_only("b/120545219")
def testControlFlow(self):
with self.cached_session() as sess:
v0 = variables.Variable(0, name="v0")
var_dict = {}
# Call get_variable in each of the cond clauses.
def var_in_then_clause():
v1 = variables.Variable(1, name="v1")
var_dict["v1"] = v1
return v1 + v0
def var_in_else_clause():
v2 = variables.Variable(2, name="v2")
var_dict["v2"] = v2
return v2 + v0
add = control_flow_ops.cond(
math_ops.less(v0, 10), var_in_then_clause, var_in_else_clause)
v1 = var_dict["v1"]
v2 = var_dict["v2"]
# We should be able to initialize and run v1 and v2 without initializing
# v0, even if the variable was created with a control dep on v0.
self.evaluate(v1.initializer)
self.assertEqual([1], self.evaluate(v1))
self.evaluate(v2.initializer)
self.assertEqual([2], self.evaluate(v2))
# v0 should still be uninitialized.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
self.evaluate(v0)
# We should not be able to run 'add' yet.
with self.assertRaisesRegexp(errors_impl.OpError, "uninitialized"):
self.evaluate(add)
# If we initialize v0 we should be able to run 'add'.
self.evaluate(v0.initializer)
self.evaluate(add)
@test_util.run_v1_only("b/120545219")
def testControlFlowInitialization(self):
"""Expects an error if an initializer is in a control-flow scope."""
def cond(i, _):
return i < 10
def body(i, _):
zero = array_ops.zeros([], dtype=dtypes.int32)
v = variables.Variable(initial_value=zero)
return (i + 1, v.read_value())
with self.assertRaisesRegexp(ValueError, "inside a control-flow"):
control_flow_ops.while_loop(cond, body, [0, 0])
@test_util.run_deprecated_v1
def testUseVariableAsTensor(self):
with self.cached_session():
var_x = variables.Variable(2.0)
var_y = variables.Variable(3.0)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(2.0, self.evaluate(var_x))
self.assertAllClose(3.0, self.evaluate(var_y))
self.assertAllClose(5.0, self.evaluate(math_ops.add(var_x, var_y)))
@test_util.run_deprecated_v1
def testZeroSizeVarSameAsConst(self):
with self.cached_session():
zero_size_var = variables.Variable(array_ops.zeros([0, 2]))
zero_size_const = array_ops.ones([2, 0])
variable_mul = math_ops.matmul(zero_size_const, zero_size_var)
const_mul = math_ops.matmul(
zero_size_const, zero_size_const, transpose_b=True)
self.evaluate(variables.global_variables_initializer())
variable_output = self.evaluate(variable_mul)
self.assertAllClose(self.evaluate(const_mul), variable_output)
self.assertAllClose([[0., 0.], [0., 0.]], variable_output)
@test_util.run_deprecated_v1
def testCachingDevice(self):
with self.cached_session():
var = variables.Variable(2.0)
self.assertEqual(var.device, var.value().device)
self.assertEqual(var.device, var.initialized_value().device)
var_cached = variables.Variable(2.0, caching_device="/job:foo")
self.assertFalse(var_cached.device.startswith("/job:foo"))
self.assertTrue(var_cached.value().device.startswith("/job:foo"))
@test_util.run_deprecated_v1
def testCollections(self):
with self.cached_session():
var_x = variables.VariableV1(2.0)
var_y = variables.VariableV1(2.0, trainable=False)
var_z = variables.VariableV1(2.0, trainable=True)
var_t = variables.VariableV1(
2.0,
trainable=True,
collections=[
ops.GraphKeys.TRAINABLE_VARIABLES, ops.GraphKeys.GLOBAL_VARIABLES
])
self.assertEqual([var_x, var_y, var_z, var_t],
variables.global_variables())
self.assertEqual([var_x, var_z, var_t], variables.trainable_variables())
@test_util.run_deprecated_v1
def testCollectionsWithScope(self):
with self.cached_session():
with ops.name_scope("scope_1"):
var_x = variables.VariableV1(2.0)
with ops.name_scope("scope_2"):
var_y = variables.VariableV1(2.0)
self.assertEqual([var_x, var_y], variables.global_variables())
self.assertEqual([var_x], variables.global_variables("scope_1"))
self.assertEqual([var_y], variables.global_variables("scope_2"))
self.assertEqual([var_x, var_y], variables.trainable_variables())
self.assertEqual([var_x], variables.trainable_variables("scope_1"))
self.assertEqual([var_y], variables.trainable_variables("scope_2"))
def testOperatorWrapping(self):
for attr in functools.WRAPPER_ASSIGNMENTS:
self.assertEqual(
getattr(variables.Variable.__add__, attr),
getattr(ops.Tensor.__add__, attr))
@test_util.run_deprecated_v1
def testOperators(self):
with self.cached_session():
var_f = variables.Variable([2.0])
add = var_f + 0.0
radd = 1.0 + var_f
sub = var_f - 1.0
rsub = 1.0 - var_f
mul = var_f * 10.0
rmul = 10.0 * var_f
div = var_f / 10.0
rdiv = 10.0 / var_f
lt = var_f < 3.0
rlt = 3.0 < var_f
le = var_f <= 2.0
rle = 2.0 <= var_f
gt = var_f > 3.0
rgt = 3.0 > var_f
ge = var_f >= 2.0
rge = 2.0 >= var_f
neg = -var_f
abs_v = abs(var_f)
var_i = variables.Variable([20])
mod = var_i % 7
rmod = 103 % var_i
var_b = variables.Variable([True, False])
and_v = operator.and_(var_b, [True, True])
or_v = operator.or_(var_b, [False, True])
xor_v = operator.xor(var_b, [False, False])
invert_v = ~var_b
rnd = np.random.rand(4, 4).astype("f")
var_t = variables.Variable(rnd)
slice_v = var_t[2, 0:0]
var_m = variables.Variable([[2.0, 3.0]])
matmul = var_m.__matmul__([[10.0], [20.0]])
rmatmul = var_m.__rmatmul__([[10.0], [20.0]])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([2.0], self.evaluate(add))
self.assertAllClose([3.0], self.evaluate(radd))
self.assertAllClose([1.0], self.evaluate(sub))
self.assertAllClose([-1.0], self.evaluate(rsub))
self.assertAllClose([20.0], self.evaluate(mul))
self.assertAllClose([20.0], self.evaluate(rmul))
self.assertAllClose([0.2], self.evaluate(div))
self.assertAllClose([5.0], self.evaluate(rdiv))
self.assertAllClose([-2.0], self.evaluate(neg))
self.assertAllClose([2.0], self.evaluate(abs_v))
self.assertAllClose([True], self.evaluate(lt))
self.assertAllClose([False], self.evaluate(rlt))
self.assertAllClose([True], self.evaluate(le))
self.assertAllClose([True], self.evaluate(rle))
self.assertAllClose([False], self.evaluate(gt))
self.assertAllClose([True], self.evaluate(rgt))
self.assertAllClose([True], self.evaluate(ge))
self.assertAllClose([True], self.evaluate(rge))
self.assertAllClose([6], self.evaluate(mod))
self.assertAllClose([3], self.evaluate(rmod))
self.assertAllClose([True, False], self.evaluate(and_v))
self.assertAllClose([True, True], self.evaluate(or_v))
self.assertAllClose([True, False], self.evaluate(xor_v))
self.assertAllClose([False, True], self.evaluate(invert_v))
self.assertAllClose(rnd[2, 0:0], self.evaluate(slice_v))
self.assertAllClose([[80.0]], self.evaluate(matmul))
self.assertAllClose([[20.0, 30.0], [40.0, 60.0]], self.evaluate(rmatmul))
@test_util.run_deprecated_v1
def testSession(self):
with self.cached_session() as sess:
var = variables.Variable([1, 12])
self.evaluate(variables.global_variables_initializer())
self.assertAllClose([1, 12], self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testColocation(self):
with ops.device("/job:ps"):
var = variables.VariableV1(0, name="v")
with ops.device("/job:worker/task:7"):
assign_op = var.assign(1)
self.assertDeviceEqual("/job:ps", assign_op.device)
self.assertEqual([b"loc:@v"], assign_op.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testInitializerFunction(self):
value = [[-42], [133.7]]
shape = [2, 1]
with self.cached_session():
initializer = lambda: constant_op.constant(value)
v1 = variables.Variable(initializer, dtype=dtypes.float32)
self.assertEqual(shape, v1.get_shape())
self.assertEqual(shape, v1.shape)
self.assertAllClose(value, self.evaluate(v1.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v1)
v2 = variables.Variable(
math_ops.negative(v1.initialized_value()), dtype=dtypes.float32)
self.assertEqual(v1.get_shape(), v2.get_shape())
self.assertEqual(v1.shape, v2.shape)
self.assertAllClose(np.negative(value), self.evaluate(v2.initial_value))
with self.assertRaises(errors_impl.FailedPreconditionError):
self.evaluate(v2)
self.evaluate(variables.global_variables_initializer())
self.assertAllClose(np.negative(value), self.evaluate(v2))
def testConstraintArg(self):
constraint = lambda x: x
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
self.assertEqual(v.constraint, constraint)
constraint = 0
with self.assertRaises(ValueError):
v = variables.Variable(
lambda: constant_op.constant(1.),
constraint=constraint)
@test_util.run_v1_only("b/120545219")
def testNoRefDataRace(self):
with self.cached_session():
a = variables.Variable([1, 2, 3], dtype=dtypes.float32)
b = variables.Variable(a.initialized_value() + 2)
c = variables.Variable(b.initialized_value() + 2)
self.evaluate(variables.global_variables_initializer())
self.assertAllEqual(self.evaluate(a), [1, 2, 3])
self.assertAllEqual(self.evaluate(b), [3, 4, 5])
self.assertAllEqual(self.evaluate(c), [5, 6, 7])
@test_util.run_deprecated_v1
def testInitializerFunctionDevicePlacement(self):
with self.cached_session():
initializer = lambda: constant_op.constant(42.0)
with ops.device("/cpu:100"):
v1 = variables.Variable(initializer, dtype=dtypes.float32, name="v1")
expected_device = "/device:CPU:100"
expected_group_v1 = [b"loc:@v1"]
self.assertEqual(expected_device, v1.op.device)
self.assertEqual(expected_group_v1, v1.op.colocation_groups())
for i in v1.initializer.inputs:
self.assertEqual(expected_group_v1, i.op.colocation_groups())
v2 = variables.Variable(initializer, dtype=dtypes.float32, name="v2")
expected_group_v2 = [b"loc:@v2"]
self.assertEqual(expected_group_v2, v2.op.colocation_groups())
for i in v2.initializer.inputs:
self.assertEqual(expected_group_v2, i.op.colocation_groups())
@test_util.run_v1_only("b/120545219")
def testVariableDefInitializedInstances(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v_def = variables.Variable(
initial_value=constant_op.constant(3.0)).to_proto()
with ops.Graph().as_default(), self.cached_session() as sess:
# v describes a VariableDef-based variable without an initial value.
v = variables.Variable(variable_def=v_def)
self.assertEqual(3.0, self.evaluate(v.initialized_value()))
# initialized_value should not rerun the initializer_op if the variable
# has already been initialized elsewhere.
self.evaluate(v.assign(1.0))
self.assertEqual(1.0, self.evaluate(v.initialized_value()))
v_def.ClearField("initial_value_name")
with ops.Graph().as_default(), self.cached_session() as sess:
# Restoring a legacy VariableDef proto that does not have
# initial_value_name set should still work.
v = variables.Variable(variable_def=v_def)
# We should also be able to re-export the variable to a new meta graph.
self.assertProtoEquals(v_def, v.to_proto())
# But attempts to use initialized_value will result in errors.
with self.assertRaises(ValueError):
self.evaluate(v.initialized_value())
def testTrainableInProto(self):
with ops.Graph().as_default():
non_trainable_variable = variables.Variable(
trainable=False,
initial_value=constant_op.constant(10.0))
self.assertEqual(
False,
variables.Variable(variable_def=non_trainable_variable.to_proto())
.trainable)
trainable_variable = variables.Variable(
trainable=True,
initial_value=constant_op.constant(10.0))
self.assertEqual(
True,
variables.Variable(variable_def=trainable_variable.to_proto())
.trainable)
@test_util.run_deprecated_v1
def testLoad(self):
with self.cached_session():
var = variables.Variable(np.zeros((5, 5), np.float32))
self.evaluate(variables.global_variables_initializer())
var.load(np.ones((5, 5), np.float32))
self.assertAllClose(np.ones((5, 5), np.float32), self.evaluate(var))
@test_util.run_v1_only("b/120545219")
def testRepr(self):
var = variables.VariableV1(np.zeros((5, 5), np.float32), name="noop")
self.assertEqual(
"<tf.Variable 'noop:0' shape=(5, 5) dtype=float32_ref>",
repr(var))
def testVariableNamesPreserveNameScopesWithDefun(self):
@function.defun
def create_variable():
with ops.name_scope("foo"):
v = variables.Variable(0.0, name="bar")
self.assertEqual(v.name, "foo/bar:0")
with ops.get_default_graph().as_default():
create_variable()
class IsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default(), self.cached_session() as sess:
uninited = variables.report_uninitialized_variables()
self.assertEqual(0, self.evaluate(uninited).size)
def testAssertVariablesInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable([1, 2], name="v")
w = variables.Variable([3, 4], name="w")
_ = v, w
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(variables.global_variables_initializer())
self.assertEqual(0, self.evaluate(uninited).size)
@test_util.run_v1_only("b/120545219")
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2], name="v")
w = variables.VariableV1([3, 4], name="w")
uninited = variables.report_uninitialized_variables()
self.assertAllEqual(np.array([b"v", b"w"]), self.evaluate(uninited))
self.evaluate(w.initializer)
self.assertAllEqual(np.array([b"v"]), self.evaluate(uninited))
v.initializer.run()
self.assertEqual(0, self.evaluate(uninited).size)
def testZeroSizeVarInitialized(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.Variable(array_ops.zeros([0, 2]), name="v")
uninited = variables.report_uninitialized_variables()
v.initializer.run() # not strictly necessary
self.assertEqual(0, self.evaluate(uninited).size)
def testTrainingWithZeroSizeVar(self):
with ops.Graph().as_default(), self.cached_session() as sess:
a = variables.Variable(array_ops.zeros([0, 2]))
b = variables.Variable(array_ops.ones([2, 2]))
objective = math_ops.reduce_sum(b + math_ops.matmul(
a, a, transpose_a=True))
self.evaluate(variables.global_variables_initializer())
do_opt = gradient_descent.GradientDescentOptimizer(0.1).minimize(
objective)
self.evaluate([do_opt])
self.assertAllClose([[0.9, 0.9], [0.9, 0.9]], self.evaluate(b))
@test_util.run_v1_only("b/120545219")
class ObsoleteIsInitializedTest(test.TestCase):
def testNoVars(self):
with ops.Graph().as_default():
self.assertEqual(None, variables.assert_variables_initialized())
def testVariables(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
_ = v, w
inited = variables.assert_variables_initialized()
with self.assertRaisesOpError("Attempting to use uninitialized value"):
self.evaluate(inited)
self.evaluate(variables.global_variables_initializer())
self.evaluate(inited)
def testVariableList(self):
with ops.Graph().as_default(), self.cached_session() as sess:
v = variables.VariableV1([1, 2])
w = variables.VariableV1([3, 4])
inited = variables.assert_variables_initialized([v])
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
self.evaluate(w.initializer)
with self.assertRaisesOpError("Attempting to use uninitialized value"):
inited.op.run()
v.initializer.run()
inited.op.run()
class PartitionedVariableTest(test.TestCase):
def testPartitionedVariable(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
partitioned_variable = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
concatenated = ops.convert_to_tensor(partitioned_variable)
num_partitions = len(partitioned_variable)
iterated_partitions = list(partitioned_variable)
self.assertEqual(2, num_partitions)
self.assertEqual([v0, v1], iterated_partitions)
self.assertEqual([2], partitioned_variable.get_shape())
self.assertEqual([2], partitioned_variable.shape)
self.assertEqual([2], concatenated.get_shape())
self.assertEqual([2], concatenated.shape)
def testPartitionedVariableFailures(self):
with ops.Graph().as_default():
with self.assertRaisesRegexp(ValueError, "empty"):
variables.PartitionedVariable(
name="fail",
shape=2,
dtype=dtypes.int32,
variable_list=[],
partitions=[])
with self.assertRaisesRegexp(ValueError, "must have a save_slice_info"):
v0 = variables.Variable([0])
partitions = [1]
variables.PartitionedVariable(
name="two_vars",
shape=[1],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "full shapes must match"):
v0 = variables.Variable([0])
v1 = variables.Variable([1])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [1], [1]))
partitions = [2]
variables.PartitionedVariable(
name="two_vars",
shape=[3],
dtype=v0.dtype,
variable_list=[v1, v0],
partitions=partitions)
with self.assertRaisesRegexp(ValueError, "must be positive"):
v0 = variables.Variable([0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
partitions = [0]
variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0],
partitions=partitions)
def testPartitionedVariableAssignments(self):
with ops.Graph().as_default(), self.cached_session():
v0 = variables.Variable(initial_value=[0.0])
v1 = variables.Variable(initial_value=[1.0])
v2 = variables.Variable(initial_value=[20.0])
v3 = variables.Variable(initial_value=[30.0])
v0._set_save_slice_info(
variables.Variable.SaveSliceInfo(v0.name, [2], [0], [1]))
v1._set_save_slice_info(
variables.Variable.SaveSliceInfo(v1.name, [2], [1], [1]))
v2._set_save_slice_info(
variables.Variable.SaveSliceInfo(v2.name, [2], [0], [1]))
v3._set_save_slice_info(
variables.Variable.SaveSliceInfo(v3.name, [2], [1], [1]))
partitions = [2]
# Pass variable_list as [v1, v0] to ensure they are properly
# re-sorted to [v0, v1] based on their slice info offsets.
pv_0 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v0, v1],
partitions=partitions)
pv_1 = variables.PartitionedVariable(
name="two_vars",
shape=[2],
dtype=v0.dtype,
variable_list=[v2, v3],
partitions=partitions)
deltas_a = constant_op.constant([1.0, 2.0])
deltas_b = constant_op.constant([3.0, 4.0])
ones = array_ops.ones([2])
plus_delta = pv_0.assign_add(deltas_a)
minus_delta = pv_0.assign_sub(deltas_b)
assign_ones = pv_0.assign(ones)
c_0 = constant_op.constant([2.0])
c_1 = constant_op.constant([3.0])
assign_list = pv_1.assign([c_0, c_1])
assign_part_value = pv_1.assign_add(assign_ones)
assign_part_var = pv_1.assign_sub(pv_0)
self.evaluate(variables.global_variables_initializer())
self.assertEqual([1.0], self.evaluate(plus_delta[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([3.0], self.evaluate(plus_delta[1]))
self.assertEqual([3.0], self.evaluate(v1))
self.assertEqual([-2.0], self.evaluate(minus_delta[0]))
self.assertEqual([-2.0], self.evaluate(v0))
self.assertEqual([-1.0], self.evaluate(minus_delta[1]))
self.assertEqual([-1.0], self.evaluate(v1))
self.assertEqual([1.0], self.evaluate(assign_ones[0]))
self.assertEqual([1.0], self.evaluate(v0))
self.assertEqual([1.0], self.evaluate(assign_ones[1]))
self.assertEqual([1.0], self.evaluate(v1))
self.assertEqual([2.0], self.evaluate(assign_list[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_list[1]))
self.assertEqual([3.0], self.evaluate(v3))
self.assertEqual([3.0], self.evaluate(assign_part_value[0]))
self.assertEqual([3.0], self.evaluate(v2))
self.assertEqual([4.0], self.evaluate(assign_part_value[1]))
self.assertEqual([4.0], self.evaluate(v3))
self.assertEqual([2.0], self.evaluate(assign_part_var[0]))
self.assertEqual([2.0], self.evaluate(v2))
self.assertEqual([3.0], self.evaluate(assign_part_var[1]))
self.assertEqual([3.0], self.evaluate(v3))
class VariableContainerTest(test.TestCase):
def testContainer(self):
with ops.Graph().as_default():
v0 = variables.Variable([0])
with ops.container("l1"):
v1 = variables.Variable([1])
with ops.container("l2"):
v2 = variables.Variable([2])
special_v = gen_state_ops.variable(
shape=[1],
dtype=dtypes.float32,
name="VariableInL3",
container="l3",
shared_name="")
v3 = variables.Variable([3])
v4 = variables.Variable([4])
self.assertEqual(compat.as_bytes(""), v0.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v1.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l2"), v2.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l3"), special_v.op.get_attr("container"))
self.assertEqual(compat.as_bytes("l1"), v3.op.get_attr("container"))
self.assertEqual(compat.as_bytes(""), v4.op.get_attr("container"))
class AggregationModesTest(test.TestCase):
def testV1V2Equal(self):
v1 = variables.VariableAggregation
v2 = variables.VariableAggregationV2
self.assertEqual(v1.NONE, v2.NONE)
self.assertEqual(v1.SUM, v2.SUM)
self.assertEqual(v1.MEAN, v2.MEAN)
self.assertEqual(v1.ONLY_FIRST_REPLICA, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v1.ONLY_FIRST_TOWER, v2.ONLY_FIRST_REPLICA)
self.assertEqual(v2.NONE, v1.NONE)
self.assertEqual(v2.SUM, v1.SUM)
self.assertEqual(v2.MEAN, v1.MEAN)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_REPLICA)
self.assertEqual(v2.ONLY_FIRST_REPLICA, v1.ONLY_FIRST_TOWER)
self.assertEqual(hash(v1.NONE), hash(v2.NONE))
self.assertEqual(hash(v1.SUM), hash(v2.SUM))
self.assertEqual(hash(v1.MEAN), hash(v2.MEAN))
self.assertEqual(hash(v1.ONLY_FIRST_REPLICA), hash(v2.ONLY_FIRST_REPLICA))
self.assertEqual(hash(v1.ONLY_FIRST_TOWER), hash(v2.ONLY_FIRST_REPLICA))
if __name__ == "__main__":
test.main()
| {
"content_hash": "d4a54350ffb3b672c1919750e0043645",
"timestamp": "",
"source": "github",
"line_count": 842,
"max_line_length": 94,
"avg_line_length": 38.402612826603324,
"alnum_prop": 0.6518323797742385,
"repo_name": "theflofly/tensorflow",
"id": "b3316b73ff6c6fccedced987cfa8428092df4014",
"size": "33024",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/variables_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "3560"
},
{
"name": "Batchfile",
"bytes": "14734"
},
{
"name": "C",
"bytes": "644154"
},
{
"name": "C#",
"bytes": "8446"
},
{
"name": "C++",
"bytes": "59546729"
},
{
"name": "CMake",
"bytes": "207169"
},
{
"name": "Dockerfile",
"bytes": "75509"
},
{
"name": "Go",
"bytes": "1507157"
},
{
"name": "HTML",
"bytes": "4680118"
},
{
"name": "Java",
"bytes": "908330"
},
{
"name": "Jupyter Notebook",
"bytes": "2510253"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "Makefile",
"bytes": "94633"
},
{
"name": "Objective-C",
"bytes": "60069"
},
{
"name": "Objective-C++",
"bytes": "118322"
},
{
"name": "PHP",
"bytes": "15108"
},
{
"name": "Pascal",
"bytes": "770"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "PureBasic",
"bytes": "25356"
},
{
"name": "Python",
"bytes": "46310564"
},
{
"name": "RobotFramework",
"bytes": "891"
},
{
"name": "Ruby",
"bytes": "838"
},
{
"name": "Shell",
"bytes": "481712"
},
{
"name": "Smarty",
"bytes": "27249"
},
{
"name": "Swift",
"bytes": "53109"
}
],
"symlink_target": ""
} |
import unittest
# test_records = frappe.get_test_records('Ebay Manager')
class TestEbayManager(unittest.TestCase):
pass
| {
"content_hash": "1806df79ed63cf43d08a904186c7fdcf",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 56,
"avg_line_length": 18.142857142857142,
"alnum_prop": 0.7559055118110236,
"repo_name": "bglazier/erpnext_ebay",
"id": "9e3070c37211c27e21d0c4aeb5849cf4d5466b6b",
"size": "223",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "erpnext_ebay/erpnext_ebay/doctype/ebay_manager/test_ebay_manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "12208"
},
{
"name": "Procfile",
"bytes": "392"
},
{
"name": "Python",
"bytes": "370106"
}
],
"symlink_target": ""
} |
# coding: utf-8
from __future__ import print_function
from __future__ import absolute_import
# install_requires of ruamel.base is not really required but the old
# ruamel.base installed __init__.py, and thus a new version should
# be installed at some point
_package_data = dict(
full_package_name="ruamel.yaml",
version_info=(0, 11, 11),
author="Anthon van der Neut",
author_email="[email protected]",
description="ruamel.yaml is a YAML parser/emitter that supports roundtrip preservation of comments, seq/map flow style, and map key order", # NOQA
entry_points=None,
install_requires=dict(
any=[],
py26=["ruamel.ordereddict"],
py27=["ruamel.ordereddict"]
),
ext_modules=[dict(
name="_ruamel_yaml",
src=["ext/_ruamel_yaml.c", "ext/api.c", "ext/writer.c", "ext/dumper.c",
"ext/loader.c", "ext/reader.c", "ext/scanner.c", "ext/parser.c",
"ext/emitter.c"],
lib=[],
# test='#include "ext/yaml.h"\n\nint main(int argc, char* argv[])\n{\nyaml_parser_t parser;\nparser = parser; /* prevent warning */\nreturn 0;\n}\n' # NOQA
)
],
classifiers=[
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: Implementation :: Jython",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Text Processing :: Markup"
],
windows_wheels=True,
read_the_docs='yaml',
)
# < from ruamel.util.new import _convert_version
def _convert_version(tup):
"""create a PEP 386 pseudo-format conformant string from tuple tup"""
ret_val = str(tup[0]) # first is always digit
next_sep = "." # separator for next extension, can be "" or "."
for x in tup[1:]:
if isinstance(x, int):
ret_val += next_sep + str(x)
next_sep = '.'
continue
first_letter = x[0].lower()
next_sep = ''
if first_letter in 'abcr':
ret_val += 'rc' if first_letter == 'r' else first_letter
elif first_letter in 'pd':
ret_val += '.post' if first_letter == 'p' else '.dev'
return ret_val
# <
version_info = _package_data['version_info']
__version__ = _convert_version(version_info)
del _convert_version
try:
from .cyaml import * # NOQA
__with_libyaml__ = True
except (ImportError, ValueError): # for Jython
__with_libyaml__ = False
# body extracted to main.py
try:
from .main import * # NOQA
except ImportError:
from ruamel.yaml.main import * # NOQA
| {
"content_hash": "024912c023e0cecf1b24f9b3ecd71b3e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 165,
"avg_line_length": 35.67857142857143,
"alnum_prop": 0.584250917584251,
"repo_name": "KaranToor/MA450",
"id": "9be208205ea694383b43ab60c013defe2f5e4b6e",
"size": "2997",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "google-cloud-sdk/lib/third_party/ruamel/yaml/__init__.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3162"
},
{
"name": "CSS",
"bytes": "1930"
},
{
"name": "HTML",
"bytes": "13381"
},
{
"name": "Java",
"bytes": "151442"
},
{
"name": "JavaScript",
"bytes": "4906"
},
{
"name": "Makefile",
"bytes": "1636"
},
{
"name": "Objective-C",
"bytes": "13335"
},
{
"name": "PHP",
"bytes": "9086"
},
{
"name": "Pascal",
"bytes": "62"
},
{
"name": "Python",
"bytes": "19710731"
},
{
"name": "Roff",
"bytes": "2069494"
},
{
"name": "Ruby",
"bytes": "690"
},
{
"name": "Shell",
"bytes": "32272"
},
{
"name": "Smarty",
"bytes": "4968"
},
{
"name": "SourcePawn",
"bytes": "616"
},
{
"name": "Swift",
"bytes": "14225"
}
],
"symlink_target": ""
} |
from lr.plugins.base import BasePlugin
class DoNotPublishError(Exception):
pass
class ITombstonePolicy(BasePlugin):
def __init__(self):
super(ITombstonePolicy, self).__init__()
def permit(self, original_rd3=None, original_crypto=None, replacement_rd3=None, replacement_crypto=None):
'''Is executed on the original resource data with the replacement document.
This function should return True if implemented policy should allow a
tombstone to be created for the original document.'''
raise NotImplementedError("permit function must be implemented.")
def permit_burial(self, replacement_rd3=None, replacement_crypto=None, graveyard=[], existing_gravestones=[]):
'''This is executed to validate that for the specified replacement_rd3, all tombstones in
the graveyard are allowed to be buried. Use this to implement a specific node policy.
Return True to permit tombstone persistence and replacement_rd3 persistence, False otherwise.'''
raise NotImplementedError("permit_burial function must be implemented")
ITombstonePolicy.ID = "Tombstone Policy" | {
"content_hash": "9e49d80eeb2ee3794935d2b41d36731e",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 114,
"avg_line_length": 46.2,
"alnum_prop": 0.729004329004329,
"repo_name": "jimklo/LearningRegistry",
"id": "55a6794560d1fddfce0e705a2f6f1efc9c4e03ee",
"size": "1233",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "LR/lr/plugins/tombstones.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "22994"
},
{
"name": "CSS",
"bytes": "7145"
},
{
"name": "Erlang",
"bytes": "27"
},
{
"name": "HTML",
"bytes": "22681"
},
{
"name": "Java",
"bytes": "192982"
},
{
"name": "JavaScript",
"bytes": "1200926"
},
{
"name": "Mako",
"bytes": "9795"
},
{
"name": "Nginx",
"bytes": "6783"
},
{
"name": "PHP",
"bytes": "7227"
},
{
"name": "Perl",
"bytes": "27772"
},
{
"name": "Prolog",
"bytes": "1444"
},
{
"name": "Python",
"bytes": "876116"
},
{
"name": "Ruby",
"bytes": "50"
},
{
"name": "Shell",
"bytes": "21953"
}
],
"symlink_target": ""
} |
"""
Example using MySQL Connector/Python showing:
* using warnings
"""
import mysql.connector
STMT = "SELECT 'abc'+1"
def main(config):
output = []
config['get_warnings'] = True
db = mysql.connector.Connect(**config)
cursor = db.cursor()
db.sql_mode = ''
output.append("Executing '%s'" % STMT)
cursor.execute(STMT)
cursor.fetchall()
warnings = cursor.fetchwarnings()
if warnings:
for w in warnings:
output.append("%d: %s" % (w[1],w[2]))
else:
output.append("We should have got warnings.")
raise Exception("Got no warnings")
cursor.close()
db.close()
return output
if __name__ == '__main__':
config = {
'host': 'localhost',
'port': 3306,
'database': 'test',
'user': 'root',
'password': '',
'charset': 'utf8',
'use_unicode': True,
'get_warnings': True,
}
out = main(config)
print('\n'.join(out))
| {
"content_hash": "ccf380655bb993cdf396abdb3affc73b",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 53,
"avg_line_length": 19.352941176470587,
"alnum_prop": 0.5379939209726444,
"repo_name": "loicbaron/nutrition",
"id": "b0c651aa0f7d7683e6d860638331f8f1dac934d6",
"size": "2164",
"binary": false,
"copies": "15",
"ref": "refs/heads/master",
"path": "import_data/mysql-connector-python-2.1.6/examples/mysql_warnings.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Blade",
"bytes": "40124"
},
{
"name": "C",
"bytes": "113252"
},
{
"name": "C++",
"bytes": "1447"
},
{
"name": "Dockerfile",
"bytes": "1652"
},
{
"name": "HTML",
"bytes": "1617"
},
{
"name": "JavaScript",
"bytes": "175722"
},
{
"name": "PHP",
"bytes": "21025"
},
{
"name": "Python",
"bytes": "1765772"
},
{
"name": "Shell",
"bytes": "2890"
}
],
"symlink_target": ""
} |
from FRS.config._cfg import ConfigValue, is_type
class names:
PUBLIC_TEAMS = ConfigValue('teampub', condition=is_type(str)) # type: ConfigValue[str]
PUBLIC_EVENTS = ConfigValue('eventpub', condition=is_type(str)) # type: ConfigValue[str]
class schemas:
PUBLIC_TEAMS_SCHEMA = ConfigValue('PublicTeam', condition=is_type(str)) # type: ConfigValue[str]
PUBLIC_EVENTS_SCHEMA = ConfigValue('PublicEvent', condition=is_type(str)) # type: ConfigValue[str]
| {
"content_hash": "29c4b5c6693bcc8d65d18863eb0a3e82",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 104,
"avg_line_length": 54.111111111111114,
"alnum_prop": 0.704312114989733,
"repo_name": "FRC-RS/FRS",
"id": "c94b948428ed0b0e44538f66ec210f6f5c44338e",
"size": "487",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "FRS/config/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5999"
},
{
"name": "HTML",
"bytes": "23368"
},
{
"name": "JavaScript",
"bytes": "7937"
},
{
"name": "Python",
"bytes": "300765"
},
{
"name": "TypeScript",
"bytes": "9547"
}
],
"symlink_target": ""
} |
"""
pylibftdi - python wrapper for libftdi
Copyright (c) 2010-2014 Ben Bass <[email protected]>
See LICENSE file for details and (absence of) warranty
pylibftdi: http://bitbucket.org/codedstructure/pylibftdi
This module contains some basic tests for the higher-level
functionality without requiring an actual hardware device
to be attached.
"""
from tests.test_common import (LoopDevice, CallCheckMixin, unittest)
from pylibftdi.device import Device
from pylibftdi import FtdiError
# and now some test cases...
class DeviceFunctions(CallCheckMixin, unittest.TestCase):
def testContextManager(self):
def _():
with Device():
pass
self.assertCallsExact(_,
['ftdi_init', 'ftdi_usb_open_desc_index', 'ftdi_set_bitmode',
'ftdi_setflowctrl', 'ftdi_set_baudrate',
'ftdi_usb_close', 'ftdi_deinit'])
def testOpen(self):
# a lazy_open open() shouldn't do anything
self.assertCallsExact(lambda: Device(lazy_open=True), [])
# a non-lazy_open open() should open the port...
self.assertCalls(lambda: Device(), 'ftdi_usb_open_desc_index')
# should be the same with device_id...
self.assertCalls(lambda: Device('bogus'), 'ftdi_usb_open_desc_index')
# should be the same with device_id...
self.assertCalls(lambda: Device(device_index=2),
'ftdi_usb_open_desc_index')
def testOpenInterface(self):
self.assertCalls(lambda: Device(interface_select=1),
'ftdi_set_interface')
# check that opening a specific interface does that
self.assertNotCalls(lambda: Device(), 'ftdi_set_interface')
def testReadWrite(self):
with Device() as dev:
self.assertCalls(lambda: dev.write('xxx'), 'ftdi_write_data')
self.assertCalls(lambda: dev.read(10), 'ftdi_read_data')
def testFlush(self):
with Device() as dev:
self.assertCalls(dev.flush_input, 'ftdi_usb_purge_rx_buffer')
self.assertCalls(dev.flush_output, 'ftdi_usb_purge_tx_buffer')
self.assertCalls(dev.flush, 'ftdi_usb_purge_buffers')
def testClose(self):
d = Device()
d.close()
self.assertRaises(FtdiError, d.write, 'hello')
d = Device()
d.close()
self.assertRaises(FtdiError, d.read, 1)
class LoopbackTest(unittest.TestCase):
"""
these all require mode='t' to pass in Python3
"""
def testPrint(self):
d = LoopDevice(mode='t')
d.write('Hello')
d.write(' World\n')
d.write('Bye')
self.assertEqual(d.readline(), 'Hello World\n')
self.assertEqual(d.readline(), 'Bye')
def testLines(self):
d = LoopDevice(mode='t')
lines = ['Hello\n', 'World\n', 'And\n', 'Goodbye\n']
d.writelines(lines)
self.assertEqual(d.readlines(), lines)
def testIterate(self):
d = LoopDevice(mode='t')
lines = ['Hello\n', 'World\n', 'And\n', 'Goodbye\n']
d.writelines(lines)
for idx, line in enumerate(d):
self.assertEqual(line, lines[idx])
def testBuffer(self):
d = LoopDevice(mode='t', chunk_size=3)
d.write('Hello')
d.write(' World\n')
d.write('Bye')
self.assertEqual(d.readline(), 'Hello World\n')
self.assertEqual(d.readline(), 'Bye')
if __name__ == "__main__":
unittest.main()
| {
"content_hash": "640c350b96ea7c932de1ec00f211274c",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 77,
"avg_line_length": 33.095238095238095,
"alnum_prop": 0.6103597122302158,
"repo_name": "claudyus/pylibftdi",
"id": "ae3b68db215768ff25175436a8c00251372e3581",
"size": "3475",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_device.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "71273"
}
],
"symlink_target": ""
} |
from dry_rest_permissions.generics import DRYPermissionFiltersBase
from django.db.models import Q
from apps.registration.models import Assignment
from .models import Appearance
from .models import Round
class AppearanceFilterBackend(DRYPermissionFiltersBase):
def filter_list_queryset(self, request, queryset, view):
"""
Limits all list requests to only be seen by the owners or creators.
"""
if request.user.is_staff:
return queryset
queryset = queryset.filter(
Q(
round__status=Round.STATUS.completed,
) |
Q(
round__session__convention__assignments__person__user=request.user,
round__session__convention__assignments__status__gt=0,
round__session__convention__assignments__category__lte=Assignment.CATEGORY.adm,
)
).distinct()
return queryset
class OutcomeFilterBackend(DRYPermissionFiltersBase):
def filter_list_queryset(self, request, queryset, view):
"""
Limits all list requests to only be seen by the owners or creators.
"""
if request.user.is_staff:
return queryset
queryset = queryset.filter(
Q(
round__status=Round.STATUS.completed,
) |
Q(
round__session__convention__assignments__person__user=request.user,
round__session__convention__assignments__status__gt=0,
round__session__convention__assignments__category__lte=Assignment.CATEGORY.adm,
)
).distinct()
return queryset
class ScoreFilterBackend(DRYPermissionFiltersBase):
def filter_list_queryset(self, request, queryset, view):
"""
Limits all list requests to only be seen by the owners or creators.
"""
if request.user.is_staff:
return queryset
queryset = queryset.filter(
# Assigned DRCJs and CAs can always see
Q(
song__appearance__round__session__convention__assignments__person__user=request.user,
song__appearance__round__session__convention__assignments__status__gt=0,
song__appearance__round__session__convention__assignments__category__lte=Assignment.CATEGORY.adm,
) |
# Panelists can see their own scores
Q(
panelist__person__user=request.user,
panelist__status__gt=0,
) |
# Panelists can see others' scores if Appearance is complete.
Q(
song__appearance__round__panelists__person__user=request.user,
song__appearance__round__panelists__status__gt=0,
song__appearance__status__lte=Appearance.STATUS.completed,
) |
# Group members can see their own scores if complete.
Q(
song__appearance__group__members__person__user=request.user,
song__appearance__group__members__status__gt=0,
song__appearance__status__lte=Appearance.STATUS.completed,
)
).distinct()
return queryset
class SongFilterBackend(DRYPermissionFiltersBase):
def filter_list_queryset(self, request, queryset, view):
"""
Limits all list requests to only be seen by the owners or creators.
"""
if request.user.is_staff:
return queryset
queryset = queryset.filter(
Q(
appearance__round__status=Round.STATUS.completed,
) |
Q(
appearance__round__session__convention__assignments__person__user=request.user,
appearance__round__session__convention__assignments__status__gt=0,
appearance__round__session__convention__assignments__category__lte=Assignment.CATEGORY.adm,
)
).distinct()
return queryset | {
"content_hash": "213e4acb3da3096aac99040b8d772eba",
"timestamp": "",
"source": "github",
"line_count": 105,
"max_line_length": 113,
"avg_line_length": 37.95238095238095,
"alnum_prop": 0.5927227101631116,
"repo_name": "barberscore/barberscore-api",
"id": "70ca6de7692830a4cadb71ef43b934a17720039d",
"size": "3985",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/apps/adjudication/filterbackends.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "HTML",
"bytes": "54125"
},
{
"name": "JavaScript",
"bytes": "5861"
},
{
"name": "Procfile",
"bytes": "114"
},
{
"name": "Python",
"bytes": "766540"
},
{
"name": "Ruby",
"bytes": "456"
}
],
"symlink_target": ""
} |
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import Queue
from pants.pantsd.service.pants_service import PantsService
class SchedulerService(PantsService):
"""The pantsd scheduler service.
This service holds an online Scheduler instance that is primed via watchman filesystem events.
This provides for a quick fork of pants runs (via the pailgun) with a fully primed ProductGraph
in memory.
"""
def __init__(self, fs_event_service, legacy_graph_helper):
"""
:param FSEventService fs_event_service: An unstarted FSEventService instance for setting up
filesystem event handlers.
:param LegacyGraphHelper legacy_graph_helper: The LegacyGraphHelper instance for graph
construction.
"""
super(SchedulerService, self).__init__()
self._fs_event_service = fs_event_service
self._graph_helper = legacy_graph_helper
self._scheduler = legacy_graph_helper.scheduler
self._engine = legacy_graph_helper.engine
self._logger = logging.getLogger(__name__)
self._event_queue = Queue.Queue(maxsize=64)
def setup(self):
"""Service setup."""
# Register filesystem event handlers on an FSEventService instance.
self._fs_event_service.register_all_files_handler(self._enqueue_fs_event)
# Start the engine.
self._engine.start()
def _enqueue_fs_event(self, event):
"""Watchman filesystem event handler for BUILD/requirements.txt updates. Called via a thread."""
self._logger.info('enqueuing {} changes for subscription {}'
.format(len(event['files']), event['subscription']))
self._event_queue.put(event)
def _handle_batch_event(self, files):
self._logger.debug('handling change event for: %s', files)
if not self._scheduler:
self._logger.debug('no scheduler. ignoring event.')
return
self._scheduler.invalidate_files(files)
def _process_event_queue(self):
"""File event notification queue processor."""
try:
event = self._event_queue.get(timeout=1)
except Queue.Empty:
return
try:
subscription, is_initial_event, files = (event['subscription'],
event['is_fresh_instance'],
[f.decode('utf-8') for f in event['files']])
except (KeyError, UnicodeDecodeError) as e:
self._logger.warn('%r raised by invalid watchman event: %s', e, event)
return
self._logger.debug('processing {} files for subscription {} (first_event={})'
.format(len(files), subscription, is_initial_event))
if not is_initial_event: # Ignore the initial all files event from watchman.
self._handle_batch_event(files)
self._event_queue.task_done()
def get_build_graph(self, spec_roots):
"""Returns a legacy BuildGraph given a set of input specs."""
return self._graph_helper.create_graph(spec_roots)
def run(self):
"""Main service entrypoint."""
while not self.is_killed:
self._process_event_queue()
def terminate(self):
"""An extension of PantsService.terminate() that tears down the engine."""
self._engine.close()
super(SchedulerService, self).terminate()
| {
"content_hash": "4545c00c41699ec02176034017e250d6",
"timestamp": "",
"source": "github",
"line_count": 90,
"max_line_length": 100,
"avg_line_length": 37.77777777777778,
"alnum_prop": 0.6505882352941177,
"repo_name": "gmalmquist/pants",
"id": "86a561aa027f5efe38baea12fb9b59d5a17ffb0f",
"size": "3547",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python/pants/pantsd/service/scheduler_service.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "Cucumber",
"bytes": "919"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Go",
"bytes": "1746"
},
{
"name": "HTML",
"bytes": "79866"
},
{
"name": "Java",
"bytes": "437330"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "5053630"
},
{
"name": "Scala",
"bytes": "84585"
},
{
"name": "Shell",
"bytes": "58748"
},
{
"name": "Thrift",
"bytes": "1966"
}
],
"symlink_target": ""
} |
"""
Demo implementation of the media player.
For more details about this platform, please refer to the documentation
https://home-assistant.io/components/demo/
"""
import homeassistant.util.dt as dt_util
from homeassistant.components.media_player import (
MediaPlayerDevice)
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE, MEDIA_TYPE_MUSIC, MEDIA_TYPE_TVSHOW,
SUPPORT_CLEAR_PLAYLIST, SUPPORT_NEXT_TRACK, SUPPORT_PAUSE, SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA, SUPPORT_PREVIOUS_TRACK, SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE, SUPPORT_SHUFFLE_SET, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET)
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the media player demo platform."""
add_entities([
DemoYoutubePlayer(
'Living Room', 'eyU3bRy2x44',
'♥♥ The Best Fireplace Video (3 hours)', 300),
DemoYoutubePlayer(
'Bedroom', 'kxopViU98Xo', 'Epic sax guy 10 hours', 360000),
DemoMusicPlayer(), DemoTVShowPlayer(),
])
YOUTUBE_COVER_URL_FORMAT = 'https://img.youtube.com/vi/{}/hqdefault.jpg'
SOUND_MODE_LIST = ['Dummy Music', 'Dummy Movie']
DEFAULT_SOUND_MODE = 'Dummy Music'
YOUTUBE_PLAYER_SUPPORT = \
SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_PLAY_MEDIA | SUPPORT_PLAY | \
SUPPORT_SHUFFLE_SET | SUPPORT_SELECT_SOUND_MODE | SUPPORT_SELECT_SOURCE
MUSIC_PLAYER_SUPPORT = \
SUPPORT_PAUSE | SUPPORT_VOLUME_SET | SUPPORT_VOLUME_MUTE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_CLEAR_PLAYLIST | \
SUPPORT_PLAY | SUPPORT_SHUFFLE_SET | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOUND_MODE
NETFLIX_PLAYER_SUPPORT = \
SUPPORT_PAUSE | SUPPORT_TURN_ON | SUPPORT_TURN_OFF | \
SUPPORT_SELECT_SOURCE | SUPPORT_PLAY | SUPPORT_SHUFFLE_SET | \
SUPPORT_PREVIOUS_TRACK | SUPPORT_NEXT_TRACK | \
SUPPORT_SELECT_SOUND_MODE
class AbstractDemoPlayer(MediaPlayerDevice):
"""A demo media players."""
# We only implement the methods that we support
def __init__(self, name):
"""Initialize the demo device."""
self._name = name
self._player_state = STATE_PLAYING
self._volume_level = 1.0
self._volume_muted = False
self._shuffle = False
self._sound_mode_list = SOUND_MODE_LIST
self._sound_mode = DEFAULT_SOUND_MODE
@property
def should_poll(self):
"""Push an update after each command."""
return False
@property
def name(self):
"""Return the name of the media player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
return self._player_state
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
return self._volume_muted
@property
def shuffle(self):
"""Boolean if shuffling is enabled."""
return self._shuffle
@property
def sound_mode(self):
"""Return the current sound mode."""
return self._sound_mode
@property
def sound_mode_list(self):
"""Return a list of available sound modes."""
return self._sound_mode_list
def turn_on(self):
"""Turn the media player on."""
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def turn_off(self):
"""Turn the media player off."""
self._player_state = STATE_OFF
self.schedule_update_ha_state()
def mute_volume(self, mute):
"""Mute the volume."""
self._volume_muted = mute
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set the volume level, range 0..1."""
self._volume_level = volume
self.schedule_update_ha_state()
def media_play(self):
"""Send play command."""
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._player_state = STATE_PAUSED
self.schedule_update_ha_state()
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._shuffle = shuffle
self.schedule_update_ha_state()
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
self._sound_mode = sound_mode
self.schedule_update_ha_state()
class DemoYoutubePlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
def __init__(self, name, youtube_id=None, media_title=None, duration=360):
"""Initialize the demo device."""
super().__init__(name)
self.youtube_id = youtube_id
self._media_title = media_title
self._duration = duration
self._progress = int(duration * .15)
self._progress_updated_at = dt_util.utcnow()
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.youtube_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MOVIE
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._duration
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return YOUTUBE_COVER_URL_FORMAT.format(self.youtube_id)
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def app_name(self):
"""Return the current running application."""
return "YouTube"
@property
def supported_features(self):
"""Flag media player features that are supported."""
return YOUTUBE_PLAYER_SUPPORT
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._progress is None:
return None
position = self._progress
if self._player_state == STATE_PLAYING:
position += (dt_util.utcnow() -
self._progress_updated_at).total_seconds()
return position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self._player_state == STATE_PLAYING:
return self._progress_updated_at
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
self.youtube_id = media_id
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._progress = self.media_position
self._progress_updated_at = dt_util.utcnow()
super().media_pause()
class DemoMusicPlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
tracks = [
('Technohead', 'I Wanna Be A Hippy (Flamman & Abraxas Radio Mix)'),
('Paul Elstak', 'Luv U More'),
('Dune', 'Hardcore Vibes'),
('Nakatomi', 'Children Of The Night'),
('Party Animals',
'Have You Ever Been Mellow? (Flamman & Abraxas Radio Mix)'),
('Rob G.*', 'Ecstasy, You Got What I Need'),
('Lipstick', "I'm A Raver"),
('4 Tune Fairytales', 'My Little Fantasy (Radio Edit)'),
('Prophet', "The Big Boys Don't Cry"),
('Lovechild', 'All Out Of Love (DJ Weirdo & Sim Remix)'),
('Stingray & Sonic Driver', 'Cold As Ice (El Bruto Remix)'),
('Highlander', 'Hold Me Now (Bass-D & King Matthew Remix)'),
('Juggernaut', 'Ruffneck Rules Da Artcore Scene (12" Edit)'),
('Diss Reaction', 'Jiiieehaaaa '),
('Flamman And Abraxas', 'Good To Go (Radio Mix)'),
('Critical Mass', 'Dancing Together'),
('Charly Lownoise & Mental Theo',
'Ultimate Sex Track (Bass-D & King Matthew Remix)'),
]
def __init__(self):
"""Initialize the demo device."""
super().__init__('Walkman')
self._cur_track = 0
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return 'bounzz-1'
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return 213
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return 'https://graph.facebook.com/v2.5/107771475912710/' \
'picture?type=large'
@property
def media_title(self):
"""Return the title of current playing media."""
return self.tracks[self._cur_track][1] if self.tracks else ""
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self.tracks[self._cur_track][0] if self.tracks else ""
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return "Bounzz"
@property
def media_track(self):
"""Return the track number of current media (Music track only)."""
return self._cur_track + 1
@property
def supported_features(self):
"""Flag media player features that are supported."""
return MUSIC_PLAYER_SUPPORT
def media_previous_track(self):
"""Send previous track command."""
if self._cur_track > 0:
self._cur_track -= 1
self.schedule_update_ha_state()
def media_next_track(self):
"""Send next track command."""
if self._cur_track < len(self.tracks) - 1:
self._cur_track += 1
self.schedule_update_ha_state()
def clear_playlist(self):
"""Clear players playlist."""
self.tracks = []
self._cur_track = 0
self._player_state = STATE_OFF
self.schedule_update_ha_state()
class DemoTVShowPlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
def __init__(self):
"""Initialize the demo device."""
super().__init__('Lounge room')
self._cur_episode = 1
self._episode_count = 13
self._source = 'dvd'
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return 'house-of-cards-1'
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return 3600
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return 'https://graph.facebook.com/v2.5/HouseofCards/picture?width=400'
@property
def media_title(self):
"""Return the title of current playing media."""
return 'Chapter {}'.format(self._cur_episode)
@property
def media_series_title(self):
"""Return the series title of current playing media (TV Show only)."""
return 'House of Cards'
@property
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return 1
@property
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self._cur_episode
@property
def app_name(self):
"""Return the current running application."""
return "Netflix"
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def supported_features(self):
"""Flag media player features that are supported."""
return NETFLIX_PLAYER_SUPPORT
def media_previous_track(self):
"""Send previous track command."""
if self._cur_episode > 1:
self._cur_episode -= 1
self.schedule_update_ha_state()
def media_next_track(self):
"""Send next track command."""
if self._cur_episode < self._episode_count:
self._cur_episode += 1
self.schedule_update_ha_state()
def select_source(self, source):
"""Set the input source."""
self._source = source
self.schedule_update_ha_state()
| {
"content_hash": "6421c1466ebfa85bf6fb71ecd50648d9",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 79,
"avg_line_length": 31.719806763285025,
"alnum_prop": 0.6146816935729515,
"repo_name": "HydrelioxGitHub/home-assistant",
"id": "de455879d3d5abdb25963e7f4f634dea989892a1",
"size": "13136",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/media_player/demo.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "1175"
},
{
"name": "Dockerfile",
"bytes": "1081"
},
{
"name": "Python",
"bytes": "14330009"
},
{
"name": "Ruby",
"bytes": "745"
},
{
"name": "Shell",
"bytes": "17364"
}
],
"symlink_target": ""
} |
"""Common functions for MongoDB and DB2 backends
"""
import pymongo
import ceilometer
from ceilometer.storage import base
from ceilometer.storage import models
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
COMMON_AVAILABLE_CAPABILITIES = {
'meters': {'query': {'simple': True,
'metadata': True}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base Connection class for MongoDB and DB2 drivers."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def get_meters(self, user=None, project=None, resource=None, source=None,
metaquery=None, pagination=None):
"""Return an iterable of models.Meter instances
:param user: Optional ID for user that owns the resource.
:param project: Optional ID for project that owns the resource.
:param resource: Optional resource filter.
:param source: Optional source filter.
:param metaquery: Optional dict with metadata to match on.
:param pagination: Optional pagination query.
"""
if pagination:
raise ceilometer.NotImplementedError('Pagination not implemented')
metaquery = metaquery or {}
q = {}
if user is not None:
q['user_id'] = user
if project is not None:
q['project_id'] = project
if resource is not None:
q['_id'] = resource
if source is not None:
q['source'] = source
q.update(metaquery)
for r in self.db.resource.find(q):
for r_meter in r['meter']:
yield models.Meter(
name=r_meter['counter_name'],
type=r_meter['counter_type'],
# Return empty string if 'counter_unit' is not valid for
# backward compatibility.
unit=r_meter.get('counter_unit', ''),
resource_id=r['_id'],
project_id=r['project_id'],
source=r['source'],
user_id=r['user_id'],
)
def get_samples(self, sample_filter, limit=None):
"""Return an iterable of model.Sample instances.
:param sample_filter: Filter.
:param limit: Maximum number of results to return.
"""
if limit == 0:
return []
q = pymongo_utils.make_query_from_filter(sample_filter,
require_meter=False)
return self._retrieve_samples(q,
[("timestamp", pymongo.DESCENDING)],
limit)
def query_samples(self, filter_expr=None, orderby=None, limit=None):
if limit == 0:
return []
query_filter = {}
orderby_filter = [("timestamp", pymongo.DESCENDING)]
transformer = pymongo_utils.QueryTransformer()
if orderby is not None:
orderby_filter = transformer.transform_orderby(orderby)
if filter_expr is not None:
query_filter = transformer.transform_filter(filter_expr)
return self._retrieve_samples(query_filter, orderby_filter, limit)
def _retrieve_samples(self, query, orderby, limit):
if limit is not None:
samples = self.db.meter.find(query,
limit=limit,
sort=orderby)
else:
samples = self.db.meter.find(query,
sort=orderby)
for s in samples:
# Remove the ObjectId generated by the database when
# the sample was inserted. It is an implementation
# detail that should not leak outside of the driver.
del s['_id']
# Backward compatibility for samples without units
s['counter_unit'] = s.get('counter_unit', '')
# Tolerate absence of recorded_at in older datapoints
s['recorded_at'] = s.get('recorded_at')
yield models.Sample(**s)
| {
"content_hash": "834cf508be79cd78d14f7a97562f58bf",
"timestamp": "",
"source": "github",
"line_count": 124,
"max_line_length": 78,
"avg_line_length": 36.645161290322584,
"alnum_prop": 0.5543573943661971,
"repo_name": "m1093782566/openstack_org_ceilometer",
"id": "32613e5f3adc049d7e382468b2f89f05220f717e",
"size": "5255",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "ceilometer/storage/pymongo_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "2657375"
},
{
"name": "Shell",
"bytes": "3204"
}
],
"symlink_target": ""
} |
class Channel(object):
def __init__(self, server, name, id, members=[]):
self.server = server
self.name = name
self.id = id
self.members = members
def __eq__(self, compare_str):
if self.name == compare_str or self.id == compare_str:
return True
else:
return False
def __str__(self):
data = ""
for key in self.__dict__.keys():
data += "{} : {}\n".format(key, str(self.__dict__[key])[:40])
return data
def __repr__(self):
return self.__str__()
def send_message(self, message):
message_json = {"type": "message", "channel": self.id, "text": message}
self.server.send_to_websocket(message_json)
| {
"content_hash": "af4435726157772da264f04f9e8a75b3",
"timestamp": "",
"source": "github",
"line_count": 26,
"max_line_length": 79,
"avg_line_length": 28.76923076923077,
"alnum_prop": 0.5200534759358288,
"repo_name": "pboennig/bellsbot",
"id": "d12586199966879c1c8cf59b808fea55d336053f",
"size": "748",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "slackclient/_channel.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Perl",
"bytes": "24"
},
{
"name": "Python",
"bytes": "69353"
}
],
"symlink_target": ""
} |
from portality.dao import DomainObject
from portality.lib import dates
class Notification(DomainObject):
"""~~Notification:Model->DomainObject:Model~~"""
__type__ = "notification"
def __init__(self, **kwargs):
super(Notification, self).__init__(**kwargs)
@property
def who(self):
return self.data.get("who")
@who.setter
def who(self, account_id):
self.data["who"] = account_id
@property
def short(self):
return self.data.get("short")
@short.setter
def short(self, message):
self.data["short"] = message
@property
def long(self):
return self.data.get("long")
@long.setter
def long(self, message):
self.data["long"] = message
@property
def seen_date(self):
return self.data.get("seen_date")
@seen_date.setter
def seen_date(self, date):
self.data["seen_date"] = date
def is_seen(self):
return "seen_date" in self.data
def set_seen(self):
self.seen_date = dates.now()
@property
def action(self):
return self.data.get("action")
@action.setter
def action(self, action_url):
self.data["action"] = action_url
@property
def classification(self):
return self.data.get("classification")
@classification.setter
def classification(self, classification):
self.data["classification"] = classification
@property
def created_by(self):
return self.data.get("created_by")
@created_by.setter
def created_by(self, consumer_id):
self.data["created_by"] = consumer_id
| {
"content_hash": "32d94cc78a2ff0b9d794de9acb68c436",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 52,
"avg_line_length": 22.36986301369863,
"alnum_prop": 0.6093080220453154,
"repo_name": "DOAJ/doaj",
"id": "cc0944b470547782e66526442de2f0c3d3ceaf6d",
"size": "1633",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "portality/models/notifications.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "2399"
},
{
"name": "Dockerfile",
"bytes": "59"
},
{
"name": "HTML",
"bytes": "483733"
},
{
"name": "JavaScript",
"bytes": "952971"
},
{
"name": "Jinja",
"bytes": "15292"
},
{
"name": "Python",
"bytes": "3195030"
},
{
"name": "SCSS",
"bytes": "75276"
},
{
"name": "Shell",
"bytes": "28415"
}
],
"symlink_target": ""
} |
# def version(self, data):
# yield from asyncio.sleep(self.delay)
# return 1
# def ping(self, data):
# yield from asyncio.sleep(self.delay)
# response = ServiceMessage()
# response.data = data
# return response
| {
"content_hash": "9832aa22841aef9b76d7de0ceb7bc9a1",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 45,
"avg_line_length": 28.666666666666668,
"alnum_prop": 0.5891472868217055,
"repo_name": "drankinn/wolverine",
"id": "c90a26eac06346454a5909914f1b4cbc9aa3b96d",
"size": "1878",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_zmq_controller.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "263"
},
{
"name": "Python",
"bytes": "73343"
},
{
"name": "Shell",
"bytes": "740"
}
],
"symlink_target": ""
} |
from copy import deepcopy
import requests
from six.moves.urllib.parse import urlparse
from datadog_checks.base.checks.openmetrics import OpenMetricsBaseCheck
from datadog_checks.base.errors import CheckException
from .metrics import METRICS_MAP
class GitlabCheck(OpenMetricsBaseCheck):
"""
Collect Gitlab metrics from Prometheus and validates that the connectivity with Gitlab
"""
# Readiness signals ability to serve traffic, liveness that Gitlab is healthy overall
ALLOWED_SERVICE_CHECKS = ['readiness', 'liveness', 'health']
EVENT_TYPE = SOURCE_TYPE_NAME = 'gitlab'
DEFAULT_CONNECT_TIMEOUT = 5
DEFAULT_RECEIVE_TIMEOUT = 15
DEFAULT_METRIC_LIMIT = 0
PROMETHEUS_SERVICE_CHECK_NAME = 'gitlab.prometheus_endpoint_up'
HTTP_CONFIG_REMAPPER = {
'receive_timeout': {'name': 'read_timeout', 'default': DEFAULT_RECEIVE_TIMEOUT},
'connect_timeout': {'name': 'connect_timeout', 'default': DEFAULT_CONNECT_TIMEOUT},
'gitlab_user': {'name': 'username'},
'gitlab_password': {'name': 'password'},
'ssl_cert_validation': {'name': 'tls_verify'},
'ssl_ca_certs': {'name': 'tls_ca_cert'},
}
def __init__(self, name, init_config, instances):
super(GitlabCheck, self).__init__(
name, init_config, [self._create_gitlab_prometheus_instance(instances[0], init_config)]
)
self.endpoint = self.instance.get('prometheus_url', self.instance.get('prometheus_endpoint'))
if self.endpoint is None:
raise CheckException("Unable to find `prometheus_url` or `prometheus_endpoint` in config file.")
self.url = self.instance.get('gitlab_url', None)
self.token = self.instance.get('api_token', None)
def check(self, _):
scraper_config = self.config_map[self.endpoint]
try:
self.process(scraper_config)
self.service_check(self.PROMETHEUS_SERVICE_CHECK_NAME, OpenMetricsBaseCheck.OK, self._tags)
except requests.exceptions.ConnectionError as e:
# Unable to connect to the metrics endpoint
self.service_check(
self.PROMETHEUS_SERVICE_CHECK_NAME,
OpenMetricsBaseCheck.CRITICAL,
message="Unable to retrieve Prometheus metrics from endpoint {}: {}".format(self.endpoint, e),
)
# Service check to check Gitlab's health endpoints
for check_type in self.ALLOWED_SERVICE_CHECKS:
self._check_health_endpoint(check_type)
self.submit_version()
def _create_gitlab_prometheus_instance(self, instance, init_config):
"""
Set up the gitlab instance so it can be used in OpenMetricsBaseCheck
"""
# Mapping from Gitlab specific Prometheus metric names to Datadog ones
metrics = [METRICS_MAP]
# Add allowed legacy metrics
metrics.extend(init_config.get('allowed_metrics', []))
gitlab_instance = deepcopy(instance)
# gitlab uses 'prometheus_endpoint' and not 'prometheus_url', so we have to rename the key
gitlab_instance['prometheus_url'] = instance.get('prometheus_url', instance.get('prometheus_endpoint'))
self._tags = self._check_tags(gitlab_instance)
gitlab_instance.update(
{
'namespace': 'gitlab',
'metrics': metrics,
# Defaults that were set when gitlab was based on PrometheusCheck
'send_distribution_counts_as_monotonic': instance.get('send_distribution_counts_as_monotonic', False),
'send_monotonic_counter': instance.get('send_monotonic_counter', False),
'health_service_check': instance.get('health_service_check', False),
'tags': self._tags,
}
)
return gitlab_instance
def _check_tags(self, instance):
custom_tags = instance.get('tags', [])
url = instance.get('gitlab_url')
# creating tags for host and port
parsed_url = urlparse(url)
gitlab_host = parsed_url.hostname
gitlab_port = 443 if parsed_url.scheme == 'https' else (parsed_url.port or 80)
return ['gitlab_host:{}'.format(gitlab_host), 'gitlab_port:{}'.format(gitlab_port)] + custom_tags
def submit_version(self):
if not self.is_metadata_collection_enabled():
return
try:
if self.token is None:
self.log.debug(
"Gitlab token not found; please add one in your config to enable version metadata collection."
)
return
param = {'access_token': self.token}
response = self.http.get("{}/api/v4/version".format(self.url), params=param)
version = response.json().get('version')
self.set_metadata('version', version)
self.log.debug("Set version %s for Gitlab", version)
except Exception as e:
self.log.warning("Gitlab version metadata not collected: %s", e)
# Validates an health endpoint
#
# Valid endpoints are:
# - /-/readiness
# - /-/liveness
# - /-/health
#
# https://docs.gitlab.com/ce/user/admin_area/monitoring/health_check.html
def _check_health_endpoint(self, check_type):
if check_type not in self.ALLOWED_SERVICE_CHECKS:
raise CheckException("Health endpoint {} is not a valid endpoint".format(check_type))
if self.url is None:
# Simply ignore this service check if not configured
self.log.debug("gitlab_url not configured, service check %s skipped", check_type)
return
# These define which endpoint is hit and which type of check is actually performed
# TODO: parse errors and report for single sub-service failure?
service_check_name = 'gitlab.{}'.format(check_type)
check_url = '{}/-/{}'.format(self.url, check_type)
try:
self.log.debug("checking %s against %s", check_type, check_url)
r = self.http.get(check_url)
if r.status_code != 200:
self.service_check(
service_check_name,
OpenMetricsBaseCheck.CRITICAL,
message="Got {} when hitting {}".format(r.status_code, check_url),
tags=self._tags,
)
raise Exception("Http status code {} on check_url {}".format(r.status_code, check_url))
else:
r.raise_for_status()
except requests.exceptions.Timeout:
# If there's a timeout
self.service_check(
service_check_name,
OpenMetricsBaseCheck.CRITICAL,
message="Timeout when hitting {}".format(check_url),
tags=self._tags,
)
raise
except Exception as e:
self.service_check(
service_check_name,
OpenMetricsBaseCheck.CRITICAL,
message="Error hitting {}. Error: {}".format(check_url, e),
tags=self._tags,
)
raise
else:
self.service_check(service_check_name, OpenMetricsBaseCheck.OK, self._tags)
self.log.debug("gitlab check %s succeeded", check_type)
| {
"content_hash": "09743089a5cd1b6ddac783e18008a4ad",
"timestamp": "",
"source": "github",
"line_count": 182,
"max_line_length": 118,
"avg_line_length": 40.252747252747255,
"alnum_prop": 0.6048321048321048,
"repo_name": "DataDog/integrations-core",
"id": "eddece214435cca4c220012bfedc3380c20d9693",
"size": "7441",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gitlab/datadog_checks/gitlab/gitlab.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "578"
},
{
"name": "COBOL",
"bytes": "12312"
},
{
"name": "Dockerfile",
"bytes": "22998"
},
{
"name": "Erlang",
"bytes": "15518"
},
{
"name": "Go",
"bytes": "6988"
},
{
"name": "HCL",
"bytes": "4080"
},
{
"name": "HTML",
"bytes": "1318"
},
{
"name": "JavaScript",
"bytes": "1817"
},
{
"name": "Kotlin",
"bytes": "430"
},
{
"name": "Lua",
"bytes": "3489"
},
{
"name": "PHP",
"bytes": "20"
},
{
"name": "PowerShell",
"bytes": "2398"
},
{
"name": "Python",
"bytes": "13020828"
},
{
"name": "Roff",
"bytes": "359"
},
{
"name": "Ruby",
"bytes": "241"
},
{
"name": "Scala",
"bytes": "7000"
},
{
"name": "Shell",
"bytes": "83227"
},
{
"name": "Swift",
"bytes": "203"
},
{
"name": "TSQL",
"bytes": "29972"
},
{
"name": "TypeScript",
"bytes": "1019"
}
],
"symlink_target": ""
} |
import time
import atexit
import pigpio
class sensor:
"""
A class to read relative humidity and temperature from the
DHT22 sensor. The sensor is also known as the AM2302.
The sensor can be powered from the Pi 3V3 or the Pi 5V rail.
Powering from the 3V3 rail is simpler and safer. You may need
to power from 5V if the sensor is connected via a long cable.
For 3V3 operation connect pin 1 to 3V3 and pin 4 to ground.
Connect pin 2 to a gpio.
For 5V operation connect pin 1 to 5V and pin 4 to ground.
The following pin 2 connection works for me. Use at YOUR OWN RISK.
5V--5K_resistor--+--10K_resistor--Ground
|
DHT22 pin 2 -----+
|
gpio ------------+
"""
def __init__(self, pi, gpio, LED=None, power=None):
"""
Instantiate with the Pi and gpio to which the DHT22 output
pin is connected.
Optionally a LED may be specified. This will be blinked for
each successful reading.
Optionally a gpio used to power the sensor may be specified.
This gpio will be set high to power the sensor. If the sensor
locks it will be power cycled to restart the readings.
Taking readings more often than about once every two seconds will
eventually cause the DHT22 to hang. A 3 second interval seems OK.
"""
self.pi = pi
self.gpio = gpio
self.LED = LED
self.power = power
if power is not None:
pi.write(power, 1) # Switch sensor on.
time.sleep(2)
self.powered = True
self.cb = None
atexit.register(self.cancel)
self.bad_CS = 0 # Bad checksum count.
self.bad_SM = 0 # Short message count.
self.bad_MM = 0 # Missing message count.
self.bad_SR = 0 # Sensor reset count.
# Power cycle if timeout > MAX_TIMEOUTS.
self.no_response = 0
self.MAX_NO_RESPONSE = 2
self.rhum = -999
self.temp = -999
self.tov = None
self.high_tick = 0
self.bit = 40
pi.set_pull_up_down(gpio, pigpio.PUD_OFF)
pi.set_watchdog(gpio, 0) # Kill any watchdogs.
self.cb = pi.callback(gpio, pigpio.EITHER_EDGE, self._cb)
def _cb(self, gpio, level, tick):
"""
Accumulate the 40 data bits. Format into 5 bytes, humidity high,
humidity low, temperature high, temperature low, checksum.
"""
diff = pigpio.tickDiff(self.high_tick, tick)
if level == 0:
# Edge length determines if bit is 1 or 0.
if diff >= 50:
val = 1
if diff >= 200: # Bad bit?
self.CS = 256 # Force bad checksum.
else:
val = 0
if self.bit >= 40: # Message complete.
self.bit = 40
elif self.bit >= 32: # In checksum byte.
self.CS = (self.CS<<1) + val
if self.bit == 39:
# 40th bit received.
self.pi.set_watchdog(self.gpio, 0)
self.no_response = 0
total = self.hH + self.hL + self.tH + self.tL
if (total & 255) == self.CS: # Is checksum ok?
self.rhum = ((self.hH<<8) + self.hL) * 0.1
if self.tH & 128: # Negative temperature.
mult = -0.1
self.tH = self.tH & 127
else:
mult = 0.1
self.temp = ((self.tH<<8) + self.tL) * mult
self.tov = time.time()
if self.LED is not None:
self.pi.write(self.LED, 0)
else:
self.bad_CS += 1
elif self.bit >=24: # in temp low byte
self.tL = (self.tL<<1) + val
elif self.bit >=16: # in temp high byte
self.tH = (self.tH<<1) + val
elif self.bit >= 8: # in humidity low byte
self.hL = (self.hL<<1) + val
elif self.bit >= 0: # in humidity high byte
self.hH = (self.hH<<1) + val
else: # header bits
pass
self.bit += 1
elif level == 1:
self.high_tick = tick
if diff > 250000:
self.bit = -2
self.hH = 0
self.hL = 0
self.tH = 0
self.tL = 0
self.CS = 0
else: # level == pigpio.TIMEOUT:
self.pi.set_watchdog(self.gpio, 0)
if self.bit < 8: # Too few data bits received.
self.bad_MM += 1 # Bump missing message count.
self.no_response += 1
if self.no_response > self.MAX_NO_RESPONSE:
self.no_response = 0
self.bad_SR += 1 # Bump sensor reset count.
if self.power is not None:
self.powered = False
self.pi.write(self.power, 0)
time.sleep(2)
self.pi.write(self.power, 1)
time.sleep(2)
self.powered = True
elif self.bit < 39: # Short message receieved.
self.bad_SM += 1 # Bump short message count.
self.no_response = 0
else: # Full message received.
self.no_response = 0
def temperature(self):
"""Return current temperature."""
return self.temp
def humidity(self):
"""Return current relative humidity."""
return self.rhum
def staleness(self):
"""Return time since measurement made."""
if self.tov is not None:
return time.time() - self.tov
else:
return -999
def bad_checksum(self):
"""Return count of messages received with bad checksums."""
return self.bad_CS
def short_message(self):
"""Return count of short messages."""
return self.bad_SM
def missing_message(self):
"""Return count of missing messages."""
return self.bad_MM
def sensor_resets(self):
"""Return count of power cycles because of sensor hangs."""
return self.bad_SR
def trigger(self):
"""Trigger a new relative humidity and temperature reading."""
if self.powered:
if self.LED is not None:
self.pi.write(self.LED, 1)
self.pi.write(self.gpio, pigpio.LOW)
time.sleep(0.017) # 17 ms
self.pi.set_mode(self.gpio, pigpio.INPUT)
self.pi.set_watchdog(self.gpio, 200)
def cancel(self):
"""Cancel the DHT22 sensor."""
self.pi.set_watchdog(self.gpio, 0)
if self.cb != None:
self.cb.cancel()
self.cb = None
if __name__ == "__main__":
import time
import pigpio
import DHT22
# Intervals of about 2 seconds or less will eventually hang the DHT22.
INTERVAL=3
pi = pigpio.pi()
s = DHT22.sensor(pi, 22, LED=16, power=8)
r = 0
next_reading = time.time()
while True:
r += 1
s.trigger()
time.sleep(0.2)
print("{} {} {} {:3.2f} {} {} {} {}".format(
r, s.humidity(), s.temperature(), s.staleness(),
s.bad_checksum(), s.short_message(), s.missing_message(),
s.sensor_resets()))
next_reading += INTERVAL
time.sleep(next_reading-time.time()) # Overall INTERVAL second polling.
s.cancel()
pi.stop()
| {
"content_hash": "99930f550d662f3b58c63833519d005e",
"timestamp": "",
"source": "github",
"line_count": 279,
"max_line_length": 77,
"avg_line_length": 26.121863799283155,
"alnum_prop": 0.5362239297475302,
"repo_name": "ninjawil/weather-station",
"id": "8a10f1742d79029372c787f19d7bf7a43c6761ac",
"size": "7334",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "scripts/DHT22.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "9572"
},
{
"name": "C++",
"bytes": "474"
},
{
"name": "CSS",
"bytes": "3344"
},
{
"name": "HTML",
"bytes": "21038"
},
{
"name": "JavaScript",
"bytes": "81387"
},
{
"name": "PHP",
"bytes": "1045"
},
{
"name": "Python",
"bytes": "172598"
},
{
"name": "Shell",
"bytes": "289"
}
],
"symlink_target": ""
} |
"""Constants for the P1 Monitor integration."""
from __future__ import annotations
from datetime import timedelta
import logging
from typing import Final
DOMAIN: Final = "p1_monitor"
LOGGER = logging.getLogger(__package__)
SCAN_INTERVAL = timedelta(seconds=5)
SERVICE_SMARTMETER: Final = "smartmeter"
SERVICE_PHASES: Final = "phases"
SERVICE_SETTINGS: Final = "settings"
SERVICES: dict[str, str] = {
SERVICE_SMARTMETER: "SmartMeter",
SERVICE_PHASES: "Phases",
SERVICE_SETTINGS: "Settings",
}
| {
"content_hash": "178ef7d15f5331ac938d73899e676b84",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 47,
"avg_line_length": 25.4,
"alnum_prop": 0.7322834645669292,
"repo_name": "jawilson/home-assistant",
"id": "d72927a80f63da64c7850caa75ca5434d46c3163",
"size": "508",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "homeassistant/components/p1_monitor/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2782"
},
{
"name": "Python",
"bytes": "40129467"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
} |
import datetime
import dateutil.parser
from pixiv_pixie.utils import LazyProperty
from .constants import IllustType, IllustAgeLimit
def _lazy(attr_name):
def get_func(self):
self.update()
return getattr(self, attr_name)
return LazyProperty(get_func, property_name=attr_name)
class PixivIllust:
"""Pixiv Illust object.
Used to access illust info.
Attributes:
illust_id: Illust ID.
title: Title.
caption: Some description text. May contains HTML tags or escape
characters.
creation_time: A datetime object.
width: Width.
height: Height.
image_urls: A list of original image urls. A ugoira's image_urls will
only contains one URL of a ZIP file which contains all frames.
frame_delays: None for non-ugoira illust. Or a list of delay durations
in microsecond.
type: Illust type. Will be ILLUST, MANGA or UGOIRA. (These constants are
defined in pixiv_pixie.constants.illust.)
age_limit: Age limitation type. Will be ALL_AGE, R18 or R18G. (These
constants are defined in pixiv_pixie.constants.illust.)
tags: Tags.
tools: Tools used be the author.
user_account: The author's account name.
user_id: The author's user ID.
user_name: The author's nickname.
total_bookmarks: The number bookmarks on this illust.
total_view: The number of times this illust been viewed.
rank: Ranking number of the illust. Only make sense when the illust was
fetched from ranking. Starting from 1.
"""
title = _lazy('title')
caption = _lazy('caption')
creation_time = _lazy('creation_time')
width = _lazy('width')
height = _lazy('height')
image_urls = _lazy('image_urls')
frame_delays = _lazy('frame_delays')
type = _lazy('type')
age_limit = _lazy('age_limit')
tags = _lazy('tags')
tools = _lazy('tools')
user_account = _lazy('user_account')
user_id = _lazy('user_id')
user_name = _lazy('user_name')
total_bookmarks = _lazy('total_bookmarks')
total_view = _lazy('total_view')
@classmethod
def from_papi(cls, pixie, json_result):
illust = cls(pixie=pixie, illust_id=json_result.id)
illust.update_from_papi(json_result)
return illust
@classmethod
def from_aapi(cls, pixie, json_result):
illust = cls(pixie=pixie, illust_id=json_result.id)
illust.update_from_aapi(json_result)
return illust
def __init__(self, pixie, illust_id):
self.pixie = pixie
self.illust_id = illust_id
self.rank = None
def __repr__(self):
return 'PixivIllust(illust_id={})'.format(self.illust_id)
@property
def size(self):
"""A tuple of (width, height)."""
return self.width, self.height
@property
def area(self):
"""Area in pixels."""
return self.width * self.height
@property
def aspect_ratio(self):
"""Width divided by height."""
if self.height == 0:
return 0
return self.width / self.height
@property
def page_count(self):
"""The number of pages."""
return len(self.image_urls)
def update(self):
illust = self.pixie.illust(self.illust_id)
attributes = [
'illust_id',
'title',
'caption',
'creation_time',
'width',
'height',
'image_urls',
'frame_delays',
'type',
'age_limit',
'tags',
'tools',
'user_account',
'user_id',
'user_name',
'total_bookmarks',
'total_view',
'rank',
]
for attr in attributes:
value = getattr(illust, attr)
if isinstance(value, list):
value = value.copy()
setattr(self, attr, value)
def update_from_papi(self, json_result):
self.illust_id = json_result.id
self.title = json_result.title
if json_result.caption is not None:
self.caption = json_result.caption
else:
self.caption = ''
self.creation_time = datetime.datetime.strptime(
json_result.created_time,
'%Y-%m-%d %H:%M:%S',
)
self.width = json_result.width
self.height = json_result.height
if json_result.page_count == 1:
if json_result.type == 'ugoira': # ugoira
if json_result.metadata is not None:
self.image_urls = [
json_result.metadata.zip_urls.ugoira600x600,
]
self.frame_delays = [
frame.delay_msec
for frame in json_result.metadata.frames
]
else: # single page illust
self.image_urls = [json_result.image_urls.large]
self.frame_delays = None
else: # multi page illust
if json_result.metadata is not None:
self.image_urls = [
page.image_urls.large
for page in json_result.metadata.pages
]
self.frame_delays = None
self.type = {
'illustration': IllustType.ILLUST,
'manga': IllustType.MANGA,
'ugoira': IllustType.UGOIRA,
}[json_result.type]
self.age_limit = {
'all-age': IllustAgeLimit.ALL_AGE,
'r18': IllustAgeLimit.R18,
'r18-g': IllustAgeLimit.R18G,
}[json_result.age_limit]
self.tags = [tag for tag in json_result.tags]
if json_result.tools is not None:
self.tools = [tool for tool in json_result.tools]
self.user_account = json_result.user.account
self.user_id = json_result.user.id
self.user_name = json_result.user.name
favorited_count = json_result.stats.favorited_count
if favorited_count.public is not None:
self.total_bookmarks = sum(favorited_count.values())
self.total_view = json_result.stats.views_count
def update_from_aapi(self, json_result):
self.illust_id = json_result.id
self.title = json_result.title
self.caption = json_result.caption
self.creation_time = dateutil.parser.parse(json_result.create_date)
self.width = json_result.width
self.height = json_result.height
if json_result.page_count == 1 and json_result.type != 'ugoira':
# single page illust
self.image_urls = [
json_result.meta_single_page.original_image_url
]
self.frame_delays = None
elif json_result.page_count > 1: # multi page illust
self.image_urls = [
page.image_urls.original
for page in json_result.meta_pages
]
self.frame_delays = None
else: # ugoira
pass
self.type = {
'illust': IllustType.ILLUST,
'manga': IllustType.MANGA,
'ugoira': IllustType.UGOIRA,
}[json_result.type]
self.tags = [tag.name for tag in json_result.tags]
self.tools = [tool for tool in json_result.tools]
if 'R-18' in self.tags:
self.age_limit = IllustAgeLimit.R18
elif 'R-18G' in self.tags:
self.age_limit = IllustAgeLimit.R18G
else:
self.age_limit = IllustAgeLimit.ALL_AGE
self.user_account = json_result.user.account
self.user_id = json_result.user.id
self.user_name = json_result.user.name
self.total_bookmarks = json_result.total_bookmarks
self.total_view = json_result.total_view
| {
"content_hash": "33c697243031aa1ca75022db26e341e6",
"timestamp": "",
"source": "github",
"line_count": 250,
"max_line_length": 80,
"avg_line_length": 31.6,
"alnum_prop": 0.5625316455696202,
"repo_name": "Xdynix/PixivPixie",
"id": "2ad9119460e9e8bcccaeb2805a0a3990f49566ba",
"size": "7900",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pixiv_pixie/illust.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "71494"
}
],
"symlink_target": ""
} |
import os.path
import sys
sys.path = [os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'lib')] + sys.path
from amqpsfw import amqp_spec, ioloop
from amqpsfw.client.client import Client
from amqpsfw.client.configuration import ClientConfiguration
class TestClientConsumer:
def test_client_consumer(self):
class ConsumerAplication(Client):
def processor(self):
channel_number = 1
start = yield from super().processor()
ch_open1 = amqp_spec.Channel.Open(channel_number=1)
ch_open_ok = yield self.write(ch_open1)
ch_open2 = amqp_spec.Channel.Open(channel_number=2)
ch_open_ok = yield self.write(ch_open2)
flow = amqp_spec.Channel.Flow(channel_number=ch_open1.channel_number)
flow_ok = yield self.write(flow)
ex_declare = amqp_spec.Exchange.Declare('message', channel_number=ch_open1.channel_number)
declare_ok = yield self.write(ex_declare)
declare_q = amqp_spec.Queue.Declare(queue_name='text', channel_number=ch_open1.channel_number)
declare_q_ok = yield self.write(declare_q)
bind = amqp_spec.Queue.Bind(queue_name='text', exchange_name='message', routing_key='text.#', channel_number=ch_open1.channel_number)
bind_ok = yield self.write(bind)
flow = amqp_spec.Channel.Flow(channel_number=ch_open2.channel_number)
flow_ok = yield self.write(flow)
ex_declare = amqp_spec.Exchange.Declare('message', channel_number=ch_open2.channel_number)
declare_ok = yield self.write(ex_declare)
declare_q = amqp_spec.Queue.Declare(queue_name='text', channel_number=ch_open2.channel_number)
declare_q_ok = yield self.write(declare_q)
bind = amqp_spec.Queue.Bind(queue_name='text', exchange_name='message', routing_key='text.#', channel_number=ch_open2.channel_number)
bind_ok = yield self.write(bind)
for t in range(100):
content = "qwe" + str(t)
response = yield self.write(amqp_spec.Basic.Publish(exchange_name='message', routing_key='text.tratata', channel_number=channel_number))
assert response is None
response = yield self.write(amqp_spec.Header(class_id=amqp_spec.Basic.Publish.class_id, body_size=len(content), header_properties={'content-type': 'application/json'}, channel_number=channel_number))
assert response is None
response = yield self.write(amqp_spec.Content(content=content, channel_number=channel_number))
assert response is None
consume = amqp_spec.Basic.Consume(queue_name='text', consumer_tag='first_consumer', channel_number=channel_number)
consume_ok = yield self.write(consume)
assert type(consume_ok) is amqp_spec.Basic.ConsumeOk
result = []
# TODO Basic.Ack don't required response so if bufer in empty we get NOne if buffer full of vrame we get somethink - fix it
# TODO if we have intensive input then output is stopped because ioloot get many read events
for i in range(90):
res = yield
# log.error('TTTTTTTTTTTTTTTTTTTTTTTTTT: ' + str(res))
if res:
result.append(res)
if type(res) == amqp_spec.Content:
# log.error('SSSSSSSSSSSSSSSSSSSSSSSSS: ' + str(result))
assert result[0].delivery_tag == (i+1)/3
yield self.write(amqp_spec.Basic.Ack(delivery_tag=result[0].delivery_tag, channel_number=result[0].channel_number))
assert type(result[0]) == amqp_spec.Basic.Deliver
assert type(result[1]) == amqp_spec.Header
assert type(result[2]) == amqp_spec.Content
result = []
yield self.stop()
def start_aplication():
io_loop = ioloop.IOLoop()
app = ConsumerAplication(io_loop)
app.config = ClientConfiguration()
app.start()
io_loop.start()
start_aplication()
| {
"content_hash": "d66e8c3317353b1df0843c1cf9dee844",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 219,
"avg_line_length": 53.433734939759034,
"alnum_prop": 0.5812852311161217,
"repo_name": "akayunov/amqpsfw",
"id": "548e1664c00e21c4bb0a10b3ba0ce4c241e73d00",
"size": "4435",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/client/test_example_consumer_app.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "109641"
}
],
"symlink_target": ""
} |
from Code.Projects.Tracking.Tracking3 import read_redshift
from Code.config import get_pwd
def extract():
raw = read_redshift(get_pwd(), query)
query = "SELECT DISTINCT 'tablename' FROM 'pg_table_def' WHERE 'schemaname' = 'public' ORDER BY 'tablename'"
data = read_redshift(get_pwd(), query)
print data | {
"content_hash": "9b3905a86336f8301bd05c3cc489ee01",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 108,
"avg_line_length": 25.916666666666668,
"alnum_prop": 0.7331189710610932,
"repo_name": "ShipJ/Code",
"id": "6514db1f938b228c7684df4a35dd109022938967",
"size": "311",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Projects/Cannes/WiFi/extract.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "732"
},
{
"name": "Makefile",
"bytes": "4258"
},
{
"name": "Python",
"bytes": "67836"
}
],
"symlink_target": ""
} |
import base64
import json
from cattle import ApiError, ClientApiError
from common_fixtures import * # NOQA
from datetime import timedelta
import time
def test_container_create_count(client, context):
cs = client.create_container(imageUuid=context.image_uuid,
count=3)
assert len(cs) == 3
for c in cs:
c = client.wait_success(c)
assert c.state == 'running'
def test_conatiner_simple_start(context):
context.create_container()
def test_container_build(super_client, context, client):
container = context.create_container(build={
'dockerfile': 'test/Dockerfile',
'remote': 'http://example.com',
'rm': True,
})
assert container.build.dockerfile == 'test/Dockerfile'
assert container.build.remote == 'http://example.com'
assert container.build.rm
image = super_client.reload(container).image()
assert image.data.fields.build.dockerfile == 'test/Dockerfile'
assert image.data.fields.build.remote == 'http://example.com'
assert image.data.fields.build.tag == context.image_uuid
assert image.data.fields.build.rm
def test_container_create_only(super_client, client, context):
uuid = "sim:{}".format(random_num())
container = super_client.create_container(accountId=context.project.id,
imageUuid=uuid,
name="test" + random_str(),
startOnCreate=False)
assert_fields(container, {
"type": "container",
"allocationState": "inactive",
"state": "creating",
"imageUuid": uuid,
"firstRunning": None,
})
container = super_client.wait_success(container)
assert_fields(container, {
"type": "container",
"allocationState": "inactive",
"state": "stopped",
"imageUuid": uuid,
})
container = super_client.reload(container)
assert container.imageId is not None
assert container.instanceTriggeredStop == 'stop'
image = super_client.wait_success(container.image())
assert_fields(image, {
"state": "active"
})
volumes = container.volumes()
assert len(volumes) == 1
root_volume = super_client.wait_success(volumes[0])
assert_fields(root_volume, {
"allocationState": "inactive",
"attachedState": "active",
"state": "inactive",
"instanceId": container.id,
"deviceNumber": 0,
})
volume_mappings = root_volume.volumeStoragePoolMaps()
assert len(volume_mappings) == 0
nics = container.nics()
assert len(nics) == 1
image = super_client.wait_success(find_one(super_client.list_image,
name=uuid))
assert_fields(image, {
"state": "active",
"name": uuid,
"isPublic": False,
})
image_mappings = image.imageStoragePoolMaps()
assert len(image_mappings) == 0
return client.reload(container)
def _assert_running(container):
assert_fields(container, {
"allocationState": "active",
"state": "running",
"startCount": NOT_NONE,
"hostId": NOT_NONE,
"firstRunning": NOT_NONE
})
root_volume = container.volumes()[0]
assert_fields(root_volume, {
"state": "active"
})
image = root_volume.image()
assert_fields(image, {
"state": "active"
})
volume_mappings = root_volume.volumeStoragePoolMaps()
assert len(volume_mappings) == 1
assert_fields(volume_mappings[0], {
"state": "active"
})
volume_pool = volume_mappings[0].storagePool()
assert_fields(volume_pool, {
"state": "active"
})
# image_mappings = image.imageStoragePoolMaps()
# assert len(image_mappings) == 2
# for image_mapping in image_mappings:
# assert_fields(image_mapping, {
# # TODO: why isn't this active?
# # "state": "active",
# "storagePoolId": volume_pool.id
# })
instance_host_mappings = container.instanceHostMaps()
assert len(instance_host_mappings) == 1
assert_fields(instance_host_mappings[0], {
"state": "active"
})
def test_container_special_labels(client, context):
uuid = "sim:{}".format(random_num())
labels = {
'io.rancher.container.display_name': 'from-label',
'io.rancher.container.network': 'true',
}
container = client.create_container(accountId=context.project.id,
networkMode='none',
imageUuid=uuid,
name="test" + random_str(),
labels=labels,
startOnCreate=False)
container = client.wait_success(container)
assert container.state == 'stopped'
assert container.name == 'from-label'
assert container.networkMode == 'managed'
def test_container_create_then_start(super_client, client, context):
container = client.create_container(startOnCreate=False,
imageUuid=context.image_uuid)
container = client.wait_success(container)
container = container.start()
assert container.state == "starting"
assert 'start' not in container
assert 'stop' in container
assert 'remove' not in container
_assert_running(super_client.wait_success(container))
def test_container_first_running(client, context):
c = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
c = client.wait_success(c)
assert c.state == 'stopped'
assert c.firstRunning is None
c = client.wait_success(c.start())
assert c.state == 'running'
assert c.firstRunning is not None
first = c.firstRunning
c = client.wait_success(c.restart())
assert c.state == 'running'
assert c.firstRunning == first
def test_container_no_net(client, context):
with pytest.raises(ClientApiError) as e:
context.create_container(networkMode='foo')
assert e.value.message == 'Failed to find network for networkMode foo'
def test_container_restart(client, super_client, context):
container = context.create_container()
_assert_running(super_client.reload(container))
ip = container.primaryIpAddress
assert ip is not None
container = context.client.wait_success(container)
container = container.restart()
assert container.state == 'restarting'
container = client.wait_success(container)
_assert_running(super_client.reload(container))
assert ip == container.primaryIpAddress
def test_container_stop(client, super_client, context):
container = context.create_container(name="test" + random_str())
container = client.wait_success(container)
assert_fields(container, {
"state": "running"
})
container = container.stop()
assert_fields(container, {
"state": "stopping"
})
container = client.wait_success(container)
assert_fields(super_client.reload(container), {
"allocationState": "active",
"state": "stopped"
})
container = super_client.reload(container)
root_volume = container.volumes()[0]
assert_fields(root_volume, {
"state": "detached"
})
image = root_volume.image()
assert_fields(image, {
"state": "active"
})
volume_mappings = root_volume.volumeStoragePoolMaps()
assert len(volume_mappings) == 1
assert_fields(volume_mappings[0], {
"state": "inactive"
})
volume_pool = volume_mappings[0].storagePool()
assert_fields(volume_pool, {
"state": "active"
})
image_mappings = image.imageStoragePoolMaps()
assert len(image_mappings) == 1
# for image_mapping in image_mappings:
# assert_fields(image_mapping, {
# # TODO: Why isn't this active
# # "state": "active",
# "storagePoolId": volume_pool.id
# })
instance_host_mappings = container.instanceHostMaps()
assert len(instance_host_mappings) == 1
assert instance_host_mappings[0].state == 'inactive'
def _assert_removed(container):
assert container.state == "removed"
assert_removed_fields(container)
volumes = container.volumes()
assert len(volumes) == 0
return container
def _assert_error(container):
assert container.state == "error"
volumes = container.volumes()
assert len(volumes) == 1
assert volumes[0].state != "removed"
volume_mappings = volumes[0].volumeStoragePoolMaps()
assert len(volume_mappings) == 1
assert volume_mappings[0].state == "inactive"
return container
def test_container_remove(client, super_client, context):
container = context.create_container(name="test" + random_str())
container = client.wait_success(container)
container = client.wait_success(container.stop())
assert container.state == "stopped"
container = client.delete(container)
assert container.state == "removing"
container = client.wait_success(container)
_assert_removed(super_client.reload(container))
return container
def test_container_delete_while_running(client, super_client, context):
container = context.create_container(name="test" + random_str())
container = client.wait_success(container)
assert container.state == 'running'
container = client.delete(container)
assert container.state == 'stopping'
container = client.wait_success(container)
_assert_removed(super_client.reload(container))
return container
def test_container_purge(client, super_client, context):
container = test_container_remove(client, super_client, context)
assert container.state == "removed"
# It's easier to call container.purge(), but this was to test other
# things too
remove_time = now() - timedelta(hours=1)
super_client.update(container, {
'removeTime': format_time(remove_time)
})
purge = super_client.list_task(name="purge.resources")[0]
purge.execute()
container = client.reload(container)
for x in range(30):
if container.state == "removed":
time.sleep(0.5)
container = client.reload(container)
else:
break
assert container.state != "removed"
container = client.wait_success(container)
assert container.state == "purged"
instance_host_mappings = super_client.reload(container).instanceHostMaps()
assert len(instance_host_mappings) == 0
volumes = container.volumes()
assert len(volumes) == 0
def test_start_stop(client, context):
container = context.create_container(name="test" + random_str())
container = client.wait_success(container)
for _ in range(5):
assert container.state == 'running'
container = client.wait_success(container.stop())
assert container.state == 'stopped'
container = client.wait_success(container.start())
assert container.state == 'running'
def test_container_image_required(client):
try:
client.create_container()
assert False
except ApiError as e:
assert e.error.status == 422
assert e.error.code == 'MissingRequired'
assert e.error.fieldName == 'imageUuid'
def test_container_compute_fail(super_client, context):
data = {
'compute.instance.activate::fail': True,
'io.cattle.platform.process.instance.InstanceStart': {
'computeTries': 1
}
}
container = context.super_create_container_no_success(data=data)
assert container.transitioning == 'error'
assert container.transitioningMessage == \
'Failing [compute.instance.activate]'
_assert_error(super_client.reload(container))
def test_container_storage_fail(super_client, context):
data = {
'storage.volume.activate::fail': True,
}
container = context.super_create_container_no_success(data=data)
assert container.transitioning == 'error'
assert container.transitioningMessage == \
'Failing [storage.volume.activate]'
_assert_error(super_client.reload(container))
def test_container_restart_policy(super_client, client):
for c in [super_client, client]:
restart_policy = c.schema.types['restartPolicy']
assert len(restart_policy.resourceFields) == 2
assert 'name' in restart_policy.resourceFields
assert 'maximumRetryCount' in restart_policy.resourceFields
container = c.schema.types['container']
assert 'restartPolicy' == \
container.resourceFields['restartPolicy'].type
def test_container_exec_on_stop(client, context):
c = context.create_container()
assert callable(c.execute)
c = client.wait_success(c.stop())
assert 'execute' not in c
def test_container_exec(context):
c = context.create_container()
assert callable(c.execute)
resp = c.execute(command=['/bin/sh'])
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert jwt['exec']['AttachStdin']
assert jwt['exec']['AttachStdout']
assert jwt['exec']['Tty']
assert jwt['exec']['Cmd'] == ['/bin/sh']
assert jwt['exec']['Container'] == c.externalId
assert jwt['exp'] is not None
resp = c.execute(command=['/bin/sh2', 'blah'], attachStdin=False,
attachStdout=False, tty=False)
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert not jwt['exec']['AttachStdin']
assert not jwt['exec']['AttachStdout']
assert not jwt['exec']['Tty']
assert jwt['exec']['Cmd'] == ['/bin/sh2', 'blah']
context.delete(c)
def test_container_logs(context):
c = context.create_container()
assert callable(c.logs)
resp = c.logs(follow=True, lines=300)
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert jwt['logs']['Container'] == c.externalId
assert jwt['logs']['Lines'] == 300
assert jwt['logs']['Follow'] is True
assert jwt['exp'] is not None
resp = c.logs()
assert resp.url is not None
assert resp.token is not None
jwt = _get_jwt(resp.token)
assert jwt['logs']['Container'] == c.externalId
assert jwt['logs']['Lines'] == 100
assert jwt['logs']['Follow'] is True
assert jwt['exp'] is not None
context.delete(c)
def test_container_labels(client, context):
labels = {'affinity': "container==B", '!affinity': "container==C"}
container = context.create_container(name="test" + random_str(),
labels=labels)
container = client.wait_success(container)
assert container.state == 'running'
assert container.labels == labels
def _get_jwt(token):
text = token.split('.')[1]
missing_padding = 4 - len(text) % 4
if missing_padding:
text += '=' * missing_padding
return json.loads(base64.b64decode(text))
def test_container_request_ip(super_client, client, context):
for i in range(2):
# Doing this twice essentially ensure that the IP gets freed the first
# time
container = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container = super_client.wait_success(container)
assert container.state == 'stopped'
container.data.fields['requestedIpAddress'] = '10.42.33.33'
container = super_client.update(container, data=container.data)
container = super_client.wait_success(container.start())
assert container.primaryIpAddress == '10.42.33.33'
# Try second time and should fail because it is used
container2 = client.create_container(imageUuid=context.image_uuid,
startOnCreate=False)
container2 = super_client.wait_success(container2)
assert container2.state == 'stopped'
container2.data.fields['requestedIpAddress'] = '10.42.33.33'
container2 = super_client.update(container2, data=container2.data)
container2 = super_client.wait_success(container2.start())
assert container2.primaryIpAddress != '10.42.33.33'
# Release 1.1.1.1
container = super_client.wait_success(super_client.delete(container))
container = super_client.wait_success(container.purge())
nics = container.nics()
assert len(nics) == 0
def test_container_network_modes(context, super_client):
c = context.create_container(networkMode=None)
c = super_client.wait_success(c)
assert c.state == 'running'
assert len(c.nics()) == 0
target = context.create_container(networkMode='bridge')
target = super_client.wait_success(target)
assert c.state == 'running'
assert len(target.nics()) == 1
for i in [('host', 'dockerHost'), ('none', 'dockerNone'),
('container', 'dockerContainer'), ('bridge', 'dockerBridge'),
('managed', 'network')]:
args = {
'networkMode': i[0]
}
if i[0] == 'container':
args['networkContainerId'] = target.id
c = context.create_container(**args)
c = super_client.wait_success(c)
assert c.state == 'running'
assert len(c.nics()) == 1
assert c.nics()[0].network().kind == i[1]
def test_container_resource_actions_json_state(context):
c = context.create_container(startOnCreate=True)
c.stop()
c.logs()
c = context.client.wait_success(c)
c.logs()
context.client.delete(c)
c = context.client.wait_success(c)
assert 'logs' not in c
def test_container_network_host_mode_w_dsn(context, super_client):
labels = {'io.rancher.container.dns': "true"}
c = context.create_container(networkMode='host', labels=labels)
c = super_client.wait_success(c)
assert c.state == 'running'
assert len(c.nics()) == 1
assert c.nics()[0].network().kind == 'dockerHost'
def test_container_request_ip_from_label(new_context):
client = new_context.client
labels = {
'io.rancher.container.requested_ip': '10.42.42.42'
}
c = new_context.create_container(labels=labels)
assert c.primaryIpAddress == '10.42.42.42'
c = client.wait_success(client.delete(c))
assert c.state == 'removed'
c = new_context.create_container(labels=labels)
assert c.primaryIpAddress == '10.42.42.42'
c = new_context.create_container(labels=labels)
assert c.primaryIpAddress != '10.42.42.42'
| {
"content_hash": "78c36d766c00c8fae3d38f2777b38673",
"timestamp": "",
"source": "github",
"line_count": 639,
"max_line_length": 78,
"avg_line_length": 29.322378716744915,
"alnum_prop": 0.6307840102471046,
"repo_name": "Cerfoglg/cattle",
"id": "c144326e5b9b2f895695e387a87fab858b5384e3",
"size": "18737",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/integration/cattletest/core/test_container.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5271"
},
{
"name": "FreeMarker",
"bytes": "71"
},
{
"name": "Java",
"bytes": "6398519"
},
{
"name": "Makefile",
"bytes": "308"
},
{
"name": "Python",
"bytes": "1582534"
},
{
"name": "Shell",
"bytes": "41134"
}
],
"symlink_target": ""
} |
"""
Communicates with RabbitMQ.
"""
import sys
from copy import copy
import logging
from traceback import format_exc
from twisted.internet import task
from txamqp.client import Closed
from txamqp.client import TwistedDelegate
from txamqp.protocol import AMQClient
from txamqp.content import Content
import txamqp.spec
from twisted.spread import pb
from twisted.internet import reactor
from twisted.internet.protocol import ClientCreator
from twisted.internet.defer import inlineCallbacks, Deferred, returnValue
from hiispider.components.base import Component, shared
from hiispider.components.logger import Logger
from specs import v0_8
LOGGER = logging.getLogger(__name__)
class Queue(Component):
"""Implements the shared 'get' method which returns a message body."""
conn = None
chan = None
queue_size = 0
statusloop = None
queue = None
def __init__(self, server, config, server_mode, **kwargs):
super(Queue, self).__init__(server, server_mode)
config = copy(config)
config.update(kwargs)
self.amqp = {
"host":config["amqp_host"],
"port":config.get("amqp_port", 5672),
"username":config["amqp_username"],
"password":config["amqp_password"],
"queue":config["amqp_queue"],
"exchange":config["amqp_exchange"],
"prefetch_count":config["amqp_prefetch_count"],
"vhost":config["amqp_vhost"]}
@inlineCallbacks
def initialize(self):
LOGGER.info("Initializing %s" % self.__class__.__name__)
client = ClientCreator(reactor,
AMQClient,
delegate=TwistedDelegate(),
vhost=self.amqp["vhost"],
spec=txamqp.spec.loadString(v0_8),
heartbeat=0)
self.conn = yield client.connectTCP(
self.amqp["host"],
self.amqp["port"],
timeout=sys.maxint)
yield self.conn.authenticate(
self.amqp["username"],
self.amqp["password"])
self.chan = yield self.conn.channel(2)
yield self.chan.channel_open()
yield self.chan.basic_qos(prefetch_count=self.amqp["prefetch_count"])
# Create Queue
yield self.chan.queue_declare(
queue=self.amqp["queue"],
durable=False,
exclusive=False,
auto_delete=False)
# Create Exchange
yield self.chan.exchange_declare(
exchange=self.amqp["exchange"],
type="fanout",
durable=False,
auto_delete=False)
yield self.chan.queue_bind(
queue=self.amqp["queue"],
exchange=self.amqp["exchange"])
yield self.chan.basic_consume(queue=self.amqp["queue"],
no_ack=False,
consumer_tag="hiispider_consumer")
self.queue = yield self.conn.queue("hiispider_consumer")
self.statusloop = task.LoopingCall(self.status_check)
self.statusloop.start(60)
LOGGER.info('%s initialized.' % self.__class__.__name__)
@shared
def publish(self, item):
self.chan.basic_publish(
exchange=self.amqp["exchange"],
content=Content(item))
@inlineCallbacks
def shutdown(self):
if self.statusloop:
self.statusloop.stop()
LOGGER.info('Closing %s' % self.__class__.__name__)
yield self.queue.close()
yield self.chan.channel_close()
chan0 = yield self.conn.channel(0)
yield chan0.connection_close()
LOGGER.info('%s closed.' % self.__class__.__name__)
@shared
@inlineCallbacks
def get(self, *args, **kwargs):
msg = yield self.queue.get(*args, **kwargs)
self.chan.basic_ack(msg.delivery_tag)
returnValue(msg.content.body)
@inlineCallbacks
def status_check(self):
try:
queue_status = yield self.chan.queue_declare(
queue=self.amqp["queue"],
passive=True)
self.queue_size = queue_status.fields[1]
LOGGER.debug("%s queue size: "
"%d" % (self.__class__.__name__, self.queue_size))
except:
LOGGER.error(format_exc())
self.reconnect()
@inlineCallbacks
def reconnect(self):
try:
yield self.shutdown()
except Exception, e:
LOGGER.error(format_exc())
try:
yield self.initialize()
except Exception, e:
LOGGER.error(format_exc())
| {
"content_hash": "d44e6109ac6affe458bee55643336e78",
"timestamp": "",
"source": "github",
"line_count": 141,
"max_line_length": 77,
"avg_line_length": 32.04255319148936,
"alnum_prop": 0.5965028773793714,
"repo_name": "hiidef/hiispider",
"id": "e17bdba52cf2b8e9323b820cbd69bbed0fdb35ca",
"size": "4565",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "hiispider/components/queue/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "532169"
},
{
"name": "Shell",
"bytes": "787"
}
],
"symlink_target": ""
} |
import os
import sys
import numpy as np
sys.path.append(os.getcwd())
def noise_sampler(bs):
return np.random.normal(0.0, 1.0, [bs, 2])
if __name__ == '__main__':
from a_nice_mc.objectives.expression.ring2d import Ring2d
from a_nice_mc.models.discriminator import MLPDiscriminator
from a_nice_mc.models.generator import create_nice_network
from a_nice_mc.train.wgan_nll import Trainer
os.environ['CUDA_VISIBLE_DEVICES'] = ''
energy_fn = Ring2d(display=False)
discriminator = MLPDiscriminator([400, 400, 400])
generator = create_nice_network(
2, 2,
[
([400], 'v1', False),
([400], 'x1', True),
([400], 'v2', False),
]
)
trainer = Trainer(generator, energy_fn, discriminator, noise_sampler, b=8, m=2)
trainer.train()
| {
"content_hash": "7319835ffb0b3416f4079fef1acec0f9",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 83,
"avg_line_length": 25.9375,
"alnum_prop": 0.6180722891566265,
"repo_name": "ermongroup/a-nice-mc",
"id": "f122ea3c5f53cb98cf1341703e9eb2d007415f9b",
"size": "830",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/nice_ring2d.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "91929"
},
{
"name": "Python",
"bytes": "49991"
}
],
"symlink_target": ""
} |
"""Multiprocessing via Popen.
This module provides a multi-processing pool backed by Popen.
with additional timeout support.
"""
import os
import sys
import struct
import threading
import subprocess
import concurrent.futures
from enum import IntEnum
from collections import namedtuple
import pickle
def kill_child_processes(pid):
"""Kill all child processes recursively for a given pid.
Parameters
----------
pid : int
The given parameter id.
"""
# pylint: disable=import-outside-toplevel
import psutil
try:
parent = psutil.Process(pid)
children = parent.children(recursive=True)
except psutil.NoSuchProcess:
return
for process in children:
try:
process.kill()
except psutil.NoSuchProcess:
pass
class StatusKind(IntEnum):
"""Running and return value status."""
RUNNING = 0
COMPLETE = 1
EXCEPTION = 2
TIMEOUT = 3
class MapResult(namedtuple("MapResult", ["status", "value"])):
"""Result of map_with_error_catching.
Parameters
----------
status : StatusKind
The status of the result.
value : Any
The result value.
"""
__slots__ = []
class PopenWorker:
"""A subprocess worker via Popen.
PopenWorker provides a low-level
API to interact with a separate process via Popen.
Parameters
----------
initializer: callable or None
A callable initializer, or None
initargs: Tuple[object]
A tuple of args for the initializer
"""
def __init__(self, initializer=None, initargs=()):
self._proc = None
self._initializer = initializer
self._initargs = initargs
if self._initializer is not None and not callable(self._initializer):
raise TypeError("initializer must be callable for PopenWorker")
def __del__(self):
try:
self.kill()
except ImportError:
pass
def kill(self):
"""Kill the current running process and cleanup.
Note
----
The worker can start a new process when send is called again.
"""
if self._proc is not None:
# allow gracefully shutdown
try:
self._writer.close()
except IOError:
pass
try:
self._reader.close()
except IOError:
pass
# kill all child processes recurisvely
try:
kill_child_processes(self._proc.pid)
except TypeError:
pass
try:
self._proc.kill()
except OSError:
pass
self._proc = None
def _start(self):
"""Start a new subprocess if nothing is available"""
if self._proc is not None:
return
# connect subprocess with a pair of pipes
main_read, worker_write = os.pipe()
worker_read, main_write = os.pipe()
cmd = [sys.executable, "-m", "tvm.exec.popen_worker"]
if sys.platform == "win32":
# pylint: disable=import-outside-toplevel
import msvcrt
worker_read_handle = msvcrt.get_osfhandle(worker_read)
worker_write_handle = msvcrt.get_osfhandle(worker_write)
os.set_handle_inheritable(worker_read_handle, True)
os.set_handle_inheritable(worker_write_handle, True)
cmd += [str(worker_read_handle), str(worker_write_handle)]
self._proc = subprocess.Popen(cmd, close_fds=False)
else:
cmd += [str(worker_read), str(worker_write)]
self._proc = subprocess.Popen(cmd, pass_fds=(worker_read, worker_write))
# close worker side of the pipe
os.close(worker_read)
os.close(worker_write)
self._reader = os.fdopen(main_read, "rb")
self._writer = os.fdopen(main_write, "wb")
def join(self, timeout=None):
"""Join the current process worker before it terminates.
Parameters
----------
timeout: Optional[number]
Timeout value, block at most timeout seconds if it
is a positive number.
"""
if self._proc:
try:
self._proc.wait(timeout)
except subprocess.TimeoutExpired:
pass
def is_alive(self):
"""Check if the process is alive"""
if self._proc:
return self._proc.poll() is None
return False
def send(self, fn, args=(), kwargs=None, timeout=None):
"""Send a new function task fn(*args, **kwargs) to the subprocess.
Parameters
----------
fn : function
The function to be invoked.
args : list
Positional argument.
kwargs : dict
Keyword arguments
timeout : float
Timeout value when executing the function
Note
----
The caller must call recv before calling the next send in
order to make sure the timeout and child process exit
won't affect the later requests.
"""
# use cloud pickle
# pylint: disable=import-outside-toplevel
import cloudpickle
if self._proc is None:
self._start()
# init
if self._initializer is not None:
self.send(self._initializer, self._initargs)
self.recv()
kwargs = {} if not kwargs else kwargs
data = cloudpickle.dumps((fn, args, kwargs, timeout), protocol=pickle.HIGHEST_PROTOCOL)
try:
self._writer.write(struct.pack("<i", len(data)))
self._writer.write(data)
self._writer.flush()
except IOError:
pass
def _child_process_error(self):
"""Raise a child process error."""
# kill and lazily restart the process in the next send.
self.kill()
return ChildProcessError("Subprocess terminated")
def recv(self):
"""Receive the result of the last send.
Returns
-------
result: object
The result of the last send.
Raises
------
ChildProcessError: if the child process exited abnormally.
TimeoutError: if timeout happens
Exception: if other exception happens during the execution.
"""
# pylint: disable=import-outside-toplevel
import cloudpickle
try:
len_data = self._reader.read(4)
except IOError:
raise self._child_process_error()
if len(len_data) == 0:
raise self._child_process_error()
try:
recv_bytes = struct.unpack("<i", len_data)[0]
status, value = cloudpickle.loads(self._reader.read(recv_bytes))
except IOError:
raise self._child_process_error()
if status == StatusKind.COMPLETE:
return value
if status == StatusKind.EXCEPTION:
raise value
assert status == StatusKind.TIMEOUT
# kill and lazily restart the process in the next send.
self.kill()
raise TimeoutError()
class PopenPoolExecutor:
"""An parallel executor backed by Popen processes.
Parameters
----------
max_worker : int
Maximum number of workers
timeout : float
Timeout value for each function submit.
initializer: callable or None
A callable initializer, or None
initargs: Tuple[object]
A tuple of args for the initializer
Note
----
If max_workers is NONE then the number returned by
os.cpu_count() is used. This method aligns with the
behavior of multiprocessing.pool().
"""
def __init__(self, max_workers=None, timeout=None, initializer=None, initargs=()):
if max_workers is None:
max_workers = os.cpu_count()
# Use an internal thread pool to send to popen workers
self._threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=max_workers)
self._timeout = timeout
self._worker_map = {}
self._lock = threading.Lock()
self._initializer = initializer
self._initargs = initargs
if self._initializer is not None and not callable(self._initializer):
raise TypeError("initializer must be callable for PopenPoolExecutor")
def __del__(self):
self._lock.acquire()
for worker in self._worker_map.values():
try:
worker.kill()
except ImportError:
pass
self._lock.release()
self._threadpool.shutdown()
def _worker_run(self, fn, args, kwargs):
"""Internal thread runner."""
self._lock.acquire()
tid = threading.get_ident()
if tid not in self._worker_map:
proc = PopenWorker(self._initializer, self._initargs)
self._worker_map[tid] = proc
else:
proc = self._worker_map[tid]
self._lock.release()
proc.send(fn, args, kwargs, self._timeout)
return proc.recv()
def _worker_run_with_error_catching(self, fn, args, kwargs) -> MapResult:
# pylint: disable=broad-except
try:
return MapResult(status=StatusKind.COMPLETE, value=self._worker_run(fn, args, kwargs))
except TimeoutError as exception:
return MapResult(status=StatusKind.TIMEOUT, value=exception)
except Exception as exception:
return MapResult(status=StatusKind.EXCEPTION, value=exception)
def submit(self, fn, *args, **kwargs) -> concurrent.futures.Future:
"""Submit a new function job to the pool
Parameters
----------
fn : function
The function to be invoked.
args : list
Positional argument.
kwargs : dict
Keyword arguments
Returns
-------
future : concurrent.futures.Future
A future that can be used to access the result.
"""
# pylint: disable=unnecessary-lambda
worker = lambda *args: self._worker_run(*args)
return self._threadpool.submit(worker, fn, args, kwargs)
def map_with_error_catching(self, fn, iterator):
"""Same as map, but catches exceptions and return them instead.
Parameters
----------
fn : function
The function to be invoked.
iterator : Iterator
Input iterator.
Returns
-------
out_iter : Iterator[MapResult]
The result iterator.
"""
worker = lambda x: self._worker_run_with_error_catching(fn, (x,), None)
return self._threadpool.map(worker, iterator)
| {
"content_hash": "8e0a0d0b188acb9d7fcce52669339168",
"timestamp": "",
"source": "github",
"line_count": 372,
"max_line_length": 98,
"avg_line_length": 29.026881720430108,
"alnum_prop": 0.5759399888868308,
"repo_name": "dmlc/tvm",
"id": "907231c1a9fa07988cae1e08f18be533a5525cba",
"size": "11614",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/tvm/contrib/popen_pool.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6112"
},
{
"name": "C",
"bytes": "92947"
},
{
"name": "C++",
"bytes": "5765945"
},
{
"name": "CMake",
"bytes": "74045"
},
{
"name": "Go",
"bytes": "112384"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "171101"
},
{
"name": "JavaScript",
"bytes": "49803"
},
{
"name": "Makefile",
"bytes": "55807"
},
{
"name": "Objective-C",
"bytes": "15241"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "7183810"
},
{
"name": "Rust",
"bytes": "181961"
},
{
"name": "Scala",
"bytes": "202148"
},
{
"name": "Shell",
"bytes": "97271"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['deprecated'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: bigip_gtm_facts
short_description: Collect facts from F5 BIG-IP GTM devices
description:
- Collect facts from F5 BIG-IP GTM devices.
version_added: 2.3
options:
include:
description:
- Fact category to collect.
required: True
choices:
- pool
- wide_ip
- server
filter:
description:
- Perform regex filter of response. Filtering is done on the name of
the resource. Valid filters are anything that can be provided to
Python's C(re) module.
deprecated:
removed_in: '2.11'
alternative: bigip_device_facts
why: >
The bigip_gtm_facts module is an outlier as all facts are being collected
in the bigip_device_facts module. Additionally, the M(bigip_device_facts)
module is easier to maintain and use.
extends_documentation_fragment: f5
notes:
- This module is deprecated. Use the C(bigip_device_facts) module instead.
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Get pool facts
bigip_gtm_facts:
server: lb.mydomain.com
user: admin
password: secret
include: pool
filter: my_pool
delegate_to: localhost
'''
RETURN = r'''
wide_ip:
description:
Contains the lb method for the wide ip and the pools that are within the wide ip.
returned: changed
type: list
sample:
wide_ip:
- enabled: True
failure_rcode: noerror
failure_rcode_response: disabled
failure_rcode_ttl: 0
full_path: /Common/foo.ok.com
last_resort_pool: ""
minimal_response: enabled
name: foo.ok.com
partition: Common
persist_cidr_ipv4: 32
persist_cidr_ipv6: 128
persistence: disabled
pool_lb_mode: round-robin
pools:
- name: d3qw
order: 0
partition: Common
ratio: 1
ttl_persistence: 3600
type: naptr
pool:
description: Contains the pool object status and enabled status.
returned: changed
type: list
sample:
pool:
- alternate_mode: round-robin
dynamic_ratio: disabled
enabled: True
fallback_mode: return-to-dns
full_path: /Common/d3qw
load_balancing_mode: round-robin
manual_resume: disabled
max_answers_returned: 1
members:
- disabled: True
flags: a
full_path: ok3.com
member_order: 0
name: ok3.com
order: 10
preference: 10
ratio: 1
service: 80
name: d3qw
partition: Common
qos_hit_ratio: 5
qos_hops: 0
qos_kilobytes_second: 3
qos_lcs: 30
qos_packet_rate: 1
qos_rtt: 50
qos_topology: 0
qos_vs_capacity: 0
qos_vs_score: 0
availability_state: offline
enabled_state: disabled
ttl: 30
type: naptr
verify_member_availability: disabled
server:
description:
Contains the virtual server enabled and availability status, and address.
returned: changed
type: list
sample:
server:
- addresses:
- device_name: /Common/qweqwe
name: 10.10.10.10
translation: none
datacenter: /Common/xfxgh
enabled: True
expose_route_domains: no
full_path: /Common/qweqwe
iq_allow_path: yes
iq_allow_service_check: yes
iq_allow_snmp: yes
limit_cpu_usage: 0
limit_cpu_usage_status: disabled
limit_max_bps: 0
limit_max_bps_status: disabled
limit_max_connections: 0
limit_max_connections_status: disabled
limit_max_pps: 0
limit_max_pps_status: disabled
limit_mem_avail: 0
limit_mem_avail_status: disabled
link_discovery: disabled
monitor: /Common/bigip
name: qweqwe
partition: Common
product: single-bigip
virtual_server_discovery: disabled
virtual_servers:
- destination: 10.10.10.10:0
enabled: True
full_path: jsdfhsd
limit_max_bps: 0
limit_max_bps_status: disabled
limit_max_connections: 0
limit_max_connections_status: disabled
limit_max_pps: 0
limit_max_pps_status: disabled
name: jsdfhsd
translation_address: none
translation_port: 0
'''
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from distutils.version import LooseVersion
try:
from f5.bigip import ManagementRoot
from icontrol.exceptions import iControlUnexpectedHTTPError
from f5.utils.responses.handlers import Stats
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
try:
from library.module_utils.network.f5.common import F5BaseClient
except ImportError:
from ansible.module_utils.network.f5.common import F5BaseClient
try:
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import f5_argument_spec
except ImportError:
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import f5_argument_spec
class F5Client(F5BaseClient):
def __init__(self, *args, **kwargs):
super(F5Client, self).__init__(*args, **kwargs)
self.provider = self.merge_provider_params()
@property
def api(self):
if self._client:
return self._client
try:
result = ManagementRoot(
self.provider['server'],
self.provider['user'],
self.provider['password'],
port=self.provider['server_port'],
verify=self.provider['validate_certs'],
token='tmos'
)
self._client = result
return self._client
except Exception as ex:
error = 'Unable to connect to {0} on port {1}. The reported error was "{2}".'.format(
self.provider['server'], self.provider['server_port'], str(ex)
)
raise F5ModuleError(error)
class BaseManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
self.types = dict(
a_s='a',
aaaas='aaaa',
cnames='cname',
mxs='mx',
naptrs='naptr',
srvs='srv'
)
def filter_matches_name(self, name):
if self.want.filter is None:
return True
matches = re.match(self.want.filter, str(name))
if matches:
return True
else:
return False
def version_is_less_than_12(self):
version = self.client.api.tmos_version
if LooseVersion(version) < LooseVersion('12.0.0'):
return True
else:
return False
def get_facts_from_collection(self, collection, collection_type=None):
results = []
for item in collection:
if not self.filter_matches_name(item.name):
continue
facts = self.format_facts(item, collection_type)
results.append(facts)
return results
def read_stats_from_device(self, resource):
stats = Stats(resource.stats.load())
return stats.stat
class UntypedManager(BaseManager):
def exec_module(self):
results = []
facts = self.read_facts()
for item in facts:
attrs = item.to_return()
filtered = [(k, v) for k, v in iteritems(attrs) if self.filter_matches_name(k)]
if filtered:
results.append(dict(filtered))
return results
class TypedManager(BaseManager):
def exec_module(self):
results = []
for collection, type in iteritems(self.types):
facts = self.read_facts(collection)
if not facts:
continue
for x in facts:
x.update({'type': type})
for item in facts:
attrs = item.to_return()
filtered = [(k, v) for k, v in iteritems(attrs) if self.filter_matches_name(k)]
if filtered:
results.append(dict(filtered))
return results
class Parameters(AnsibleF5Parameters):
@property
def include(self):
requested = self._values['include']
valid = ['pool', 'wide_ip', 'server', 'all']
if any(x for x in requested if x not in valid):
raise F5ModuleError(
"The valid 'include' choices are {0}".format(', '.join(valid))
)
if 'all' in requested:
return ['all']
else:
return requested
class BaseParameters(Parameters):
@property
def enabled(self):
if self._values['enabled'] is None:
return None
elif self._values['enabled'] in BOOLEANS_TRUE:
return True
else:
return False
@property
def disabled(self):
if self._values['disabled'] is None:
return None
elif self._values['disabled'] in BOOLEANS_TRUE:
return True
else:
return False
def _remove_internal_keywords(self, resource):
resource.pop('kind', None)
resource.pop('generation', None)
resource.pop('selfLink', None)
resource.pop('isSubcollection', None)
resource.pop('fullPath', None)
def to_return(self):
result = {}
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
return result
class PoolParameters(BaseParameters):
api_map = {
'alternateMode': 'alternate_mode',
'dynamicRatio': 'dynamic_ratio',
'fallbackMode': 'fallback_mode',
'fullPath': 'full_path',
'loadBalancingMode': 'load_balancing_mode',
'manualResume': 'manual_resume',
'maxAnswersReturned': 'max_answers_returned',
'qosHitRatio': 'qos_hit_ratio',
'qosHops': 'qos_hops',
'qosKilobytesSecond': 'qos_kilobytes_second',
'qosLcs': 'qos_lcs',
'qosPacketRate': 'qos_packet_rate',
'qosRtt': 'qos_rtt',
'qosTopology': 'qos_topology',
'qosVsCapacity': 'qos_vs_capacity',
'qosVsScore': 'qos_vs_score',
'verifyMemberAvailability': 'verify_member_availability',
'membersReference': 'members'
}
returnables = [
'alternate_mode', 'dynamic_ratio', 'enabled', 'disabled', 'fallback_mode',
'load_balancing_mode', 'manual_resume', 'max_answers_returned', 'members',
'name', 'partition', 'qos_hit_ratio', 'qos_hops', 'qos_kilobytes_second',
'qos_lcs', 'qos_packet_rate', 'qos_rtt', 'qos_topology', 'qos_vs_capacity',
'qos_vs_score', 'ttl', 'type', 'full_path', 'availability_state',
'enabled_state', 'availability_status'
]
@property
def max_answers_returned(self):
if self._values['max_answers_returned'] is None:
return None
return int(self._values['max_answers_returned'])
@property
def members(self):
result = []
if self._values['members'] is None or 'items' not in self._values['members']:
return result
for item in self._values['members']['items']:
self._remove_internal_keywords(item)
if 'disabled' in item:
if item['disabled'] in BOOLEANS_TRUE:
item['disabled'] = True
else:
item['disabled'] = False
if 'enabled' in item:
if item['enabled'] in BOOLEANS_TRUE:
item['enabled'] = True
else:
item['enabled'] = False
if 'fullPath' in item:
item['full_path'] = item.pop('fullPath')
if 'memberOrder' in item:
item['member_order'] = int(item.pop('memberOrder'))
# Cast some attributes to integer
for x in ['order', 'preference', 'ratio', 'service']:
if x in item:
item[x] = int(item[x])
result.append(item)
return result
@property
def qos_hit_ratio(self):
if self._values['qos_hit_ratio'] is None:
return None
return int(self._values['qos_hit_ratio'])
@property
def qos_hops(self):
if self._values['qos_hops'] is None:
return None
return int(self._values['qos_hops'])
@property
def qos_kilobytes_second(self):
if self._values['qos_kilobytes_second'] is None:
return None
return int(self._values['qos_kilobytes_second'])
@property
def qos_lcs(self):
if self._values['qos_lcs'] is None:
return None
return int(self._values['qos_lcs'])
@property
def qos_packet_rate(self):
if self._values['qos_packet_rate'] is None:
return None
return int(self._values['qos_packet_rate'])
@property
def qos_rtt(self):
if self._values['qos_rtt'] is None:
return None
return int(self._values['qos_rtt'])
@property
def qos_topology(self):
if self._values['qos_topology'] is None:
return None
return int(self._values['qos_topology'])
@property
def qos_vs_capacity(self):
if self._values['qos_vs_capacity'] is None:
return None
return int(self._values['qos_vs_capacity'])
@property
def qos_vs_score(self):
if self._values['qos_vs_score'] is None:
return None
return int(self._values['qos_vs_score'])
@property
def availability_state(self):
if self._values['stats'] is None:
return None
try:
result = self._values['stats']['status_availabilityState']
return result['description']
except AttributeError:
return None
@property
def enabled_state(self):
if self._values['stats'] is None:
return None
try:
result = self._values['stats']['status_enabledState']
return result['description']
except AttributeError:
return None
@property
def availability_status(self):
# This fact is a combination of the availability_state and enabled_state
#
# The purpose of the fact is to give a higher-level view of the availability
# of the pool, that can be used in playbooks. If you need further detail,
# consider using the following facts together.
#
# - availability_state
# - enabled_state
#
if self.enabled_state == 'enabled':
if self.availability_state == 'offline':
return 'red'
elif self.availability_state == 'available':
return 'green'
elif self.availability_state == 'unknown':
return 'blue'
else:
return 'none'
else:
# disabled
return 'black'
class WideIpParameters(BaseParameters):
api_map = {
'fullPath': 'full_path',
'failureRcode': 'failure_return_code',
'failureRcodeResponse': 'failure_return_code_response',
'failureRcodeTtl': 'failure_return_code_ttl',
'lastResortPool': 'last_resort_pool',
'minimalResponse': 'minimal_response',
'persistCidrIpv4': 'persist_cidr_ipv4',
'persistCidrIpv6': 'persist_cidr_ipv6',
'poolLbMode': 'pool_lb_mode',
'ttlPersistence': 'ttl_persistence'
}
returnables = [
'full_path', 'description', 'enabled', 'disabled', 'failure_return_code',
'failure_return_code_response', 'failure_return_code_ttl', 'last_resort_pool',
'minimal_response', 'persist_cidr_ipv4', 'persist_cidr_ipv6', 'pool_lb_mode',
'ttl_persistence', 'pools'
]
@property
def pools(self):
result = []
if self._values['pools'] is None:
return []
for pool in self._values['pools']:
del pool['nameReference']
for x in ['order', 'ratio']:
if x in pool:
pool[x] = int(pool[x])
result.append(pool)
return result
@property
def failure_return_code_ttl(self):
if self._values['failure_return_code_ttl'] is None:
return None
return int(self._values['failure_return_code_ttl'])
@property
def persist_cidr_ipv4(self):
if self._values['persist_cidr_ipv4'] is None:
return None
return int(self._values['persist_cidr_ipv4'])
@property
def persist_cidr_ipv6(self):
if self._values['persist_cidr_ipv6'] is None:
return None
return int(self._values['persist_cidr_ipv6'])
@property
def ttl_persistence(self):
if self._values['ttl_persistence'] is None:
return None
return int(self._values['ttl_persistence'])
class ServerParameters(BaseParameters):
api_map = {
'fullPath': 'full_path',
'exposeRouteDomains': 'expose_route_domains',
'iqAllowPath': 'iq_allow_path',
'iqAllowServiceCheck': 'iq_allow_service_check',
'iqAllowSnmp': 'iq_allow_snmp',
'limitCpuUsage': 'limit_cpu_usage',
'limitCpuUsageStatus': 'limit_cpu_usage_status',
'limitMaxBps': 'limit_max_bps',
'limitMaxBpsStatus': 'limit_max_bps_status',
'limitMaxConnections': 'limit_max_connections',
'limitMaxConnectionsStatus': 'limit_max_connections_status',
'limitMaxPps': 'limit_max_pps',
'limitMaxPpsStatus': 'limit_max_pps_status',
'limitMemAvail': 'limit_mem_available',
'limitMemAvailStatus': 'limit_mem_available_status',
'linkDiscovery': 'link_discovery',
'proberFallback': 'prober_fallback',
'proberPreference': 'prober_preference',
'virtualServerDiscovery': 'virtual_server_discovery',
'devicesReference': 'devices',
'virtualServersReference': 'virtual_servers'
}
returnables = [
'datacenter', 'enabled', 'disabled', 'expose_route_domains', 'iq_allow_path',
'full_path', 'iq_allow_service_check', 'iq_allow_snmp', 'limit_cpu_usage',
'limit_cpu_usage_status', 'limit_max_bps', 'limit_max_bps_status',
'limit_max_connections', 'limit_max_connections_status', 'limit_max_pps',
'limit_max_pps_status', 'limit_mem_available', 'limit_mem_available_status',
'link_discovery', 'monitor', 'product', 'prober_fallback', 'prober_preference',
'virtual_server_discovery', 'addresses', 'devices', 'virtual_servers'
]
@property
def product(self):
if self._values['product'] is None:
return None
if self._values['product'] in ['single-bigip', 'redundant-bigip']:
return 'bigip'
return self._values['product']
@property
def devices(self):
result = []
if self._values['devices'] is None or 'items' not in self._values['devices']:
return result
for item in self._values['devices']['items']:
self._remove_internal_keywords(item)
if 'fullPath' in item:
item['full_path'] = item.pop('fullPath')
result.append(item)
return result
@property
def virtual_servers(self):
result = []
if self._values['virtual_servers'] is None or 'items' not in self._values['virtual_servers']:
return result
for item in self._values['virtual_servers']['items']:
self._remove_internal_keywords(item)
if 'disabled' in item:
if item['disabled'] in BOOLEANS_TRUE:
item['disabled'] = True
else:
item['disabled'] = False
if 'enabled' in item:
if item['enabled'] in BOOLEANS_TRUE:
item['enabled'] = True
else:
item['enabled'] = False
if 'fullPath' in item:
item['full_path'] = item.pop('fullPath')
if 'limitMaxBps' in item:
item['limit_max_bps'] = int(item.pop('limitMaxBps'))
if 'limitMaxBpsStatus' in item:
item['limit_max_bps_status'] = item.pop('limitMaxBpsStatus')
if 'limitMaxConnections' in item:
item['limit_max_connections'] = int(item.pop('limitMaxConnections'))
if 'limitMaxConnectionsStatus' in item:
item['limit_max_connections_status'] = item.pop('limitMaxConnectionsStatus')
if 'limitMaxPps' in item:
item['limit_max_pps'] = int(item.pop('limitMaxPps'))
if 'limitMaxPpsStatus' in item:
item['limit_max_pps_status'] = item.pop('limitMaxPpsStatus')
if 'translationAddress' in item:
item['translation_address'] = item.pop('translationAddress')
if 'translationPort' in item:
item['translation_port'] = int(item.pop('translationPort'))
result.append(item)
return result
@property
def limit_cpu_usage(self):
if self._values['limit_cpu_usage'] is None:
return None
return int(self._values['limit_cpu_usage'])
@property
def limit_max_bps(self):
if self._values['limit_max_bps'] is None:
return None
return int(self._values['limit_max_bps'])
@property
def limit_max_connections(self):
if self._values['limit_max_connections'] is None:
return None
return int(self._values['limit_max_connections'])
@property
def limit_max_pps(self):
if self._values['limit_max_pps'] is None:
return None
return int(self._values['limit_max_pps'])
@property
def limit_mem_available(self):
if self._values['limit_mem_available'] is None:
return None
return int(self._values['limit_mem_available'])
class PoolFactManager(BaseManager):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
super(PoolFactManager, self).__init__(**kwargs)
self.kwargs = kwargs
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('untyped')
else:
manager = self.get_manager('typed')
facts = manager.exec_module()
result = dict(pool=facts)
return result
def get_manager(self, type):
if type == 'typed':
return TypedPoolFactManager(**self.kwargs)
elif type == 'untyped':
return UntypedPoolFactManager(**self.kwargs)
class TypedPoolFactManager(TypedManager):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
super(TypedPoolFactManager, self).__init__(**kwargs)
self.want = PoolParameters(params=self.module.params)
def read_facts(self, collection):
results = []
collection = self.read_collection_from_device(collection)
for resource in collection:
attrs = resource.attrs
attrs['stats'] = self.read_stats_from_device(resource)
params = PoolParameters(params=attrs)
results.append(params)
return results
def read_collection_from_device(self, collection_name):
pools = self.client.api.tm.gtm.pools
collection = getattr(pools, collection_name)
result = collection.get_collection(
requests_params=dict(
params='expandSubcollections=true'
)
)
return result
class UntypedPoolFactManager(UntypedManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(UntypedPoolFactManager, self).__init__(**kwargs)
self.want = PoolParameters(params=self.module.params)
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
attrs = resource.attrs
attrs['stats'] = self.read_stats_from_device(resource)
params = PoolParameters(params=attrs)
results.append(params)
return results
def read_collection_from_device(self):
result = self.client.api.tm.gtm.pools.get_collection(
requests_params=dict(
params='expandSubcollections=true'
)
)
return result
class WideIpFactManager(BaseManager):
def exec_module(self):
if self.version_is_less_than_12():
manager = self.get_manager('untyped')
else:
manager = self.get_manager('typed')
facts = manager.exec_module()
result = dict(wide_ip=facts)
return result
def get_manager(self, type):
if type == 'typed':
return TypedWideIpFactManager(**self.kwargs)
elif type == 'untyped':
return UntypedWideIpFactManager(**self.kwargs)
class TypedWideIpFactManager(TypedManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(TypedWideIpFactManager, self).__init__(**kwargs)
self.want = WideIpParameters(params=self.module.params)
def read_facts(self, collection):
results = []
collection = self.read_collection_from_device(collection)
for resource in collection:
attrs = resource.attrs
params = WideIpParameters(params=attrs)
results.append(params)
return results
def read_collection_from_device(self, collection_name):
wideips = self.client.api.tm.gtm.wideips
collection = getattr(wideips, collection_name)
result = collection.get_collection(
requests_params=dict(
params='expandSubcollections=true'
)
)
return result
class UntypedWideIpFactManager(UntypedManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(UntypedWideIpFactManager, self).__init__(**kwargs)
self.want = WideIpParameters(params=self.module.params)
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
attrs = resource.attrs
params = WideIpParameters(params=attrs)
results.append(params)
return results
def read_collection_from_device(self):
result = self.client.api.tm.gtm.wideips.get_collection(
requests_params=dict(
params='expandSubcollections=true'
)
)
return result
class ServerFactManager(UntypedManager):
def __init__(self, *args, **kwargs):
self.client = kwargs.get('client', None)
self.module = kwargs.get('module', None)
super(ServerFactManager, self).__init__(**kwargs)
self.want = ServerParameters(params=self.module.params)
def exec_module(self):
facts = super(ServerFactManager, self).exec_module()
result = dict(server=facts)
return result
def read_facts(self):
results = []
collection = self.read_collection_from_device()
for resource in collection:
attrs = resource.attrs
params = ServerParameters(params=attrs)
results.append(params)
return results
def read_collection_from_device(self):
result = self.client.api.tm.gtm.servers.get_collection(
requests_params=dict(
params='expandSubcollections=true'
)
)
return result
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.kwargs = kwargs
self.want = Parameters(params=self.module.params)
def exec_module(self):
if not self.gtm_provisioned():
raise F5ModuleError(
"GTM must be provisioned to use this module."
)
if 'all' in self.want.include:
names = ['pool', 'wide_ip', 'server']
else:
names = self.want.include
managers = [self.get_manager(name) for name in names]
result = self.execute_managers(managers)
if result:
result['changed'] = True
else:
result['changed'] = False
self._announce_deprecations()
return result
def _announce_deprecations(self):
warnings = []
if self.want:
warnings += self.want._values.get('__warnings', [])
for warning in warnings:
self.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def execute_managers(self, managers):
results = dict()
for manager in managers:
result = manager.exec_module()
results.update(result)
return results
def get_manager(self, which):
if 'pool' == which:
return PoolFactManager(**self.kwargs)
if 'wide_ip' == which:
return WideIpFactManager(**self.kwargs)
if 'server' == which:
return ServerFactManager(**self.kwargs)
def gtm_provisioned(self):
resource = self.client.api.tm.sys.dbs.db.load(
name='provisioned.cpu.gtm'
)
if int(resource.value) == 0:
return False
return True
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = False
argument_spec = dict(
include=dict(
type='list',
choices=[
'pool',
'wide_ip',
'server',
],
required=True
),
filter=dict()
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
client = F5Client(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
module.exit_json(**results)
except F5ModuleError as ex:
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| {
"content_hash": "bef36611aeb5d8ba47d2d308b5396a57",
"timestamp": "",
"source": "github",
"line_count": 979,
"max_line_length": 101,
"avg_line_length": 32.40449438202247,
"alnum_prop": 0.5799079561215483,
"repo_name": "SergeyCherepanov/ansible",
"id": "745360cc10c533638731e727dccbdcc13d4ebf6b",
"size": "31901",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "ansible/ansible/modules/network/f5/_bigip_gtm_facts.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Shell",
"bytes": "824"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class TickfontValidator(_plotly_utils.basevalidators.CompoundValidator):
def __init__(
self, plotly_name="tickfont", parent_name="heatmap.colorbar", **kwargs
):
super(TickfontValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
data_class_str=kwargs.pop("data_class_str", "Tickfont"),
data_docs=kwargs.pop(
"data_docs",
"""
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
""",
),
**kwargs,
)
| {
"content_hash": "f66da1c3020687ce942fcd07a5a61a83",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 39.64102564102564,
"alnum_prop": 0.5407503234152652,
"repo_name": "plotly/plotly.py",
"id": "0d554d1067f4c40ad57457ddb1c1fccb548a1701",
"size": "1546",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/heatmap/colorbar/_tickfont.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "545"
},
{
"name": "JavaScript",
"bytes": "2074"
},
{
"name": "PostScript",
"bytes": "565328"
},
{
"name": "Python",
"bytes": "31506317"
},
{
"name": "TypeScript",
"bytes": "71337"
}
],
"symlink_target": ""
} |
import sys
sys.path.append('../common/tests')
import uuid
import json
from flexmock import flexmock, Mock
from testtools.matchers import Equals, Contains, Not
import stevedore.extension
from test_utils import *
from test_common import TestCase, setup_extra_flexmock
import ConfigParser
from vnc_cfg_api_server import vnc_cfg_api_server
import bottle
import vnc_openstack
@bottle.hook('after_request')
def after_request():
bottle.response.headers['Content-Type'] = 'application/json; charset="UTF-8"'
try:
del bottle.response.headers['Content-Length']
except KeyError:
pass
class VncOpenstackTestCase(TestCase):
def __init__(self, *args, **kwargs):
super(VncOpenstackTestCase, self).__init__(*args, **kwargs)
self._config_knobs = [
('DEFAULTS', '', ''),
('KEYSTONE', 'admin_user', ''),
('KEYSTONE', 'admin_password', ''),
('KEYSTONE', 'admin_tenant_name', ''),
('KEYSTONE', 'admin_token', ''),
('KEYSTONE', 'auth_host', ''),
('KEYSTONE', 'auth_port', ''),
('KEYSTONE', 'auth_protocol', 'http'),
]
# end __init__
def setUp(self):
setup_extra_flexmock([(stevedore.extension.ExtensionManager, '__new__', FakeExtensionManager)])
super(VncOpenstackTestCase, self).setUp()
# end setUp
def tearDown(self):
with open('vnc_openstack.err') as f:
self.assertThat(len(f.read()), Equals(0),
"Error log in vnc_openstack.err")
super(VncOpenstackTestCase, self).tearDown()
# end class VncOpenstackTestCase
class NeutronBackendTestCase(VncOpenstackTestCase):
def setUp(self):
FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.neutronApi'] = [vnc_openstack.NeutronApiDriver]
super(NeutronBackendTestCase, self).setUp()
# end setUp
def tearDown(self):
del FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.neutronApi']
super(NeutronBackendTestCase, self).tearDown()
# end tearDown
# end class NeutronBackendTestCase
class KeystoneSyncTestCase(VncOpenstackTestCase):
def setup_flexmock(self):
import keystoneclient.v2_0.client as keystone
FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.resync'] = [vnc_openstack.OpenstackDriver]
FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.resourceApi'] = [vnc_openstack.ResourceApiDriver]
setup_extra_flexmock([(keystone.Client, '__new__', get_keystone_client)])
# end setup_flexmock
def setUp(self):
self.setup_flexmock()
super(KeystoneSyncTestCase, self).setUp()
# end setUp
def tearDown(self):
del FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.resync']
del FakeExtensionManager._entry_pt_to_classes['vnc_cfg_api.resourceApi']
super(KeystoneSyncTestCase, self).tearDown()
# end tearDown
# end class KeystoneSyncTestCase
| {
"content_hash": "df1367167856cb2d36930f3744a5196e",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 112,
"avg_line_length": 35.404761904761905,
"alnum_prop": 0.6580363147276396,
"repo_name": "Juniper/contrail-dev-controller",
"id": "730d8db47b11a3f3684ebd3300f2230edf33a830",
"size": "2974",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "src/config/vnc_openstack/vnc_openstack/tests/test_case.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "242661"
},
{
"name": "C++",
"bytes": "12643864"
},
{
"name": "CSS",
"bytes": "531"
},
{
"name": "Java",
"bytes": "143864"
},
{
"name": "Lua",
"bytes": "6835"
},
{
"name": "Objective-C",
"bytes": "28773"
},
{
"name": "Python",
"bytes": "2243464"
},
{
"name": "Shell",
"bytes": "37954"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.db.models import Avg, Max, Min, Count
# Python standard
import math
from datetime import date as _date
# Spenglr
from network.models import Network
# Calculate mSQ for the oldest records
def batch_network_update_sq():
# Random 1000 networks
# NOTE: Fix to something more sensible
# + where at least one member
objects = Network.objects.filter(usernetwork__isnull=False).order_by('?')[:100]
for o in objects:
o.update_sq() # Call SQ recalculation for this course
def searchqueryset_usernetwork_boost( request, sqs ):
# Apply profile/local-boost
profile = request.user.get_profile()
boost = list()
# Cannot boost on phrase with spaces use (hopefully unique) iso3+code string to avoid text-clashes
for network in request.user.networks.all():
sqs = sqs.boost( ('network%d' % network.id).lower(), 2 ) # Need to boost with lowercase
return sqs
| {
"content_hash": "27b2917f3c06e26880b1743dd4fcaf8f",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 102,
"avg_line_length": 30.875,
"alnum_prop": 0.6811740890688259,
"repo_name": "mfitzp/smrtr",
"id": "7ab1e624b80b2f70b501a6bce09116bc80a1754a",
"size": "988",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/network/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "173375"
},
{
"name": "Shell",
"bytes": "14"
}
],
"symlink_target": ""
} |
from warehouse import tasks
from warehouse.accounts.interfaces import ITokenService, TokenExpired
from warehouse.packaging.models import RoleInvitation, RoleInvitationStatus
@tasks.task(ignore_result=True, acks_late=True)
def update_role_invitation_status(request):
invites = (
request.db.query(RoleInvitation)
.filter(RoleInvitation.invite_status == RoleInvitationStatus.Pending)
.all()
)
token_service = request.find_service(ITokenService, name="email")
for invite in invites:
try:
token_service.loads(invite.token)
except TokenExpired:
invite.invite_status = RoleInvitationStatus.Expired
| {
"content_hash": "b78b0ed4b571636c973d84748fddef65",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 77,
"avg_line_length": 35.473684210526315,
"alnum_prop": 0.7240356083086054,
"repo_name": "pypa/warehouse",
"id": "6daa5fde1b2918dbfdba627f6fb65c90f364b263",
"size": "1215",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "warehouse/manage/tasks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "676"
},
{
"name": "Dockerfile",
"bytes": "6745"
},
{
"name": "HCL",
"bytes": "42"
},
{
"name": "HTML",
"bytes": "663799"
},
{
"name": "JavaScript",
"bytes": "128585"
},
{
"name": "Makefile",
"bytes": "5068"
},
{
"name": "Mako",
"bytes": "2040"
},
{
"name": "Procfile",
"bytes": "527"
},
{
"name": "Python",
"bytes": "3315335"
},
{
"name": "SCSS",
"bytes": "205844"
},
{
"name": "Shell",
"bytes": "9424"
},
{
"name": "YARA",
"bytes": "9079"
}
],
"symlink_target": ""
} |
from importlib import import_module
import os
import sys
from setuptools import setup, find_packages
from setuptools.command.test import test
def package_env(file_name, strict=False):
file_path = os.path.join(os.path.dirname(__file__), file_name)
if os.path.exists(file_path) or strict:
return open(file_path).read()
else:
return ''
PROJECT = 'swarm-crawler'
VERSION = package_env('VERSION')
URL = package_env('URL')
AUTHOR_AND_EMAIL = [v.strip('>').strip() for v \
in package_env('AUTHOR').split('<mailto:')]
if len(AUTHOR_AND_EMAIL) == 2:
AUTHOR, AUTHOR_EMAIL = AUTHOR_AND_EMAIL
else:
AUTHOR = AUTHOR_AND_EMAIL
AUTHOR_EMAIL = ''
DESC = "swarm application to extract meaningful texts from any site"
class TestRunner(test):
def run(self, *args, **kwargs):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(self.\
distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
from test import run
run()
if __name__ == '__main__':
setup(
cmdclass={"test": TestRunner},
name=PROJECT,
version=VERSION,
description=DESC,
long_description=package_env('README.rst'),
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=package_env('LICENSE'),
packages=['swarm_crawler', ] + ['.'.join(('swarm_crawler', e)) \
for e in find_packages('swarm_crawler')],
package_dir={
'swarm_crawler': 'swarm_crawler'},
include_package_data=True,
zip_safe=False,
test_suite='test',
install_requires=[
'distribute',
'breadability',
'swarm',
'swarm-http',
'lxml',
'cliff',
'datrie',
'flask-introspect',
'flask-ample'
],
tests_require=[],
entry_points={
'console_scripts': [
'crawler = swarm_crawler.main:main'
],
'swarm_crawler.commands': [
'serve = swarm_crawler.commands.server:Web',
'start_text = swarm_crawler.commands.start:StartText',
'start = swarm_crawler.commands.start:StartDataset',
'start_datasource = swarm_crawler.commands.start:StartDatasource',
'restore_list = swarm_crawler.commands.crawl:RestoreList',
'restore = swarm_crawler.commands.crawl:Restore',
'dataset_list = swarm_crawler.commands.dataset:DatasetList',
'dataset_info = swarm_crawler.commands.dataset:DatasetInfo',
'dataset_delete = swarm_crawler.commands.dataset:DeleteDataset',
'dataset_backup = swarm_crawler.commands.dataset:DatasetBackup',
'datasource_create = swarm_crawler.commands.datasource:CreateDatasource',
'datasource_delete = swarm_crawler.commands.datasource:DeleteDatasource',
],
},
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
],
) | {
"content_hash": "92a8448577848ce1930303ce31004534",
"timestamp": "",
"source": "github",
"line_count": 102,
"max_line_length": 89,
"avg_line_length": 34.59803921568628,
"alnum_prop": 0.5471805043921791,
"repo_name": "denz/swarm-crawler",
"id": "38b8daac4ab8c5de34c91ad5067acd34182058c1",
"size": "3552",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "3656"
},
{
"name": "Python",
"bytes": "57865"
}
],
"symlink_target": ""
} |
'''
Created on 22.10.2014
@author: Philip
'''
import flask
import flask_login
import users.forms
import users.models
import utils.forms
import utils.redirect
from utils.views import get_column_names, create_table, alert_warning, alert_info, alert_success, \
error_not_found, error_access_denied, create_action_urls
bp_users = flask.Blueprint("users", __name__)
@bp_users.route('/login', methods=['GET', 'POST'])
def login():
form = users.forms.LoginForm()
if form.validate_on_submit():
user = users.models.User.query.filter_by(username=form.username.data).first()
if user is not None and user.verify_password(form.password.data):
flask_login.login_user(user, True)
alert_info('You have been logged in')
return utils.redirect.redirect_back('index')
alert_warning('Invalid username or password')
return flask.render_template('login.html', form=form)
@bp_users.route('/logout')
def logout():
flask_login.logout_user()
alert_info('You have been logged out')
return flask.redirect(flask.url_for('.login'))
@bp_users.route('/register', methods=['GET', 'POST'])
def register():
form = users.forms.RegisterForm()
if form.validate_on_submit():
users.models.User.create(form=form)
alert_success('Your account has been created')
return flask.redirect(flask.url_for('.login'))
return flask.render_template('register.html', form=form)
@bp_users.route('/table')
@flask_login.login_required
def table():
if not flask_login.current_user.is_admin():
return error_access_denied('Users', 'index')
models = users.models.User.query.all()
columns = get_column_names(users.models.User())
table = create_table(models, {'Edit': '.edit', 'Delete': '.delete'}, user_id='id')
actions = create_action_urls({'Add': '.add'})
return flask.render_template('table_page.html', title="Users", columns=columns, table=table, actions=actions)
@bp_users.route('/add', methods=['GET', 'POST'])
@flask_login.login_required
def add():
if not flask_login.current_user.is_admin():
return error_access_denied('Users', 'index')
form = users.forms.UserForm()
form.role.choices = [(role.index, str(role).capitalize()) for role in users.constants.Roles]
if form.validate_on_submit():
users.models.User.create(form=form)
alert_success('The user has been saved')
return flask.redirect(flask.url_for('.table'))
return flask.render_template('add_page.html', type="User", form=form)
@bp_users.route('/edit/<int:user_id>', methods=['GET', 'POST'])
@flask_login.login_required
def edit(user_id):
user = users.models.User.query.filter_by(id=user_id).first()
if user is None:
return error_not_found('User ID {}'.format(user_id), 'index')
if not user.has_access(flask_login.current_user):
return error_access_denied('User ID {}'.format(user_id), 'index')
form = users.forms.UserForm(flask.request.form, user)
# Set default of username field to check if changed (see custom validator
# function validate_username)
form.username.default = user.username
if flask_login.current_user.is_admin():
form.role.choices = [(role.index, str(role).capitalize()) for role in users.constants.Roles]
else:
form.role.choices = [(flask_login.current_user.role, flask_login.current_user.role_string)]
if form.validate_on_submit():
user.update(form=form)
alert_success('The user has been saved')
actions = create_action_urls({'Delete': '.delete'}, user, user_id='id')
return flask.render_template('edit_page.html', type="User", name=user.username, form=form, actions=actions)
@bp_users.route('/delete/<int:user_id>', methods=['GET', 'POST'])
@flask_login.login_required
def delete(user_id):
user = users.models.User.query.filter_by(id=user_id).first()
if user is None:
return error_not_found('User ID {}'.format(user_id), 'index')
if not user.has_access(flask_login.current_user):
return error_access_denied('User ID {}'.format(user_id), 'index')
form = utils.forms.ConfirmationForm()
if form.validate_on_submit():
if form.radio.data == 'yes':
user.delete()
alert_success('The user has been deleted')
return flask.redirect(flask.url_for('index'))
return flask.render_template('delete_page.html', type="User", name=user.username, form=form)
| {
"content_hash": "bc8a25aa7d06f0e81628b4f58a91bba3",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 113,
"avg_line_length": 37.747899159663866,
"alnum_prop": 0.6667408726625111,
"repo_name": "philipschoemig/TACTourney",
"id": "18b82e5e02390f29d385fff5667833b488367013",
"size": "4492",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/users/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "8273"
},
{
"name": "JavaScript",
"bytes": "23910"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "49303"
}
],
"symlink_target": ""
} |
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import ConfigParser
import cStringIO
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import sys
import traceback
from oslo.config import cfg
from tuskar.openstack.common.gettextutils import _
from tuskar.openstack.common import importutils
from tuskar.openstack.common import jsonutils
from tuskar.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config',
metavar='PATH',
help='If this option is specified, the logging configuration '
'file specified is used and overrides any other logging '
'options specified. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user)s %(tenant)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqplib=WARN',
'sqlalchemy=WARN',
'boto=WARN',
'suds=INFO',
'keystone=INFO',
'eventlet.wsgi.server=WARN'
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
else:
instance_uuid = kwargs.pop('instance_uuid', None)
if instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra.update({'instance': instance_extra})
extra.update({"project": self.project})
extra.update({"version": self.version})
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [itertools.ifilter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config):
try:
logging.config.fileConfig(log_config)
except ConfigParser.Error as exc:
raise LogConfigError(log_config, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config:
_load_log_config(CONF.log_config)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not CONF.log_file:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"tuskar.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""
create a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
| {
"content_hash": "e3f41cb2d4bd1b339f04a95f23c9ca5c",
"timestamp": "",
"source": "github",
"line_count": 539,
"max_line_length": 78,
"avg_line_length": 35.06679035250464,
"alnum_prop": 0.5901275064811385,
"repo_name": "ccrouch/tuskar",
"id": "b29d5b6cf94253e51fb287b1574067f0a01bf5ee",
"size": "19717",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tuskar/openstack/common/log.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
import unittest
import numpy
import chainer
from chainer.backends import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.utils import type_check
@testing.parameterize(*testing.product_dict(
[{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
],
[{'slices': (0, slice(0, 1), numpy.array(-1)), 'b_data': numpy.array([1])},
{'slices': (slice(None), 0, [0, 2]),
'b_data': numpy.random.uniform(size=(4, 2))},
{'slices': ([1, 0], [0, 0], [2, 0]),
'b_data': numpy.random.uniform(size=(2,))},
{'slices': 1, 'b_data': numpy.random.uniform(size=(2, 3))},
{'slices': numpy.array([False, True, False, True]),
'b_data': numpy.random.uniform(size=(2, 2, 3))},
{'slices': [], 'b_data': numpy.empty(shape=(0, 2, 3))},
]
))
class TestScatterAdd(unittest.TestCase):
def setUp(self):
self.shape = (4, 2, 3)
self.a_data = numpy.random.uniform(
-1, 1, self.shape).astype(self.dtype)
self.a_data_original = self.a_data.copy()
self.gy_data = numpy.random.uniform(
-1, 1, self.shape).astype(self.dtype)
self.b_data = self.b_data.astype(self.dtype)
self.gga_data = numpy.random.uniform(
-1, 1, self.a_data.shape).astype(self.dtype)
self.ggb_data = numpy.random.uniform(
-1, 1, self.b_data.shape).astype(self.dtype)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-4}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-3}
if self.dtype == numpy.float16:
self.check_backward_options['dtype'] = numpy.float64
self.check_double_backward_options['dtype'] = numpy.float64
def check_forward(self, a_data, b_data):
a = chainer.Variable(a_data)
b = chainer.Variable(b_data)
y = functions.scatter_add(a, self.slices, b)
self.assertEqual(y.data.dtype, self.dtype)
# Test to make sure that the input values are not changed
numpy.testing.assert_equal(cuda.to_cpu(a.data), self.a_data_original)
a_data_copy = cuda.to_cpu(a_data).copy()
numpy.add.at(a_data_copy, self.slices, cuda.to_cpu(b_data))
numpy.testing.assert_equal(a_data_copy, cuda.to_cpu(y.data))
def test_forward_cpu(self):
self.check_forward(self.a_data, self.b_data)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.a_data), cuda.to_gpu(self.b_data))
def check_backward(self, a_data, b_data, y_grad):
def f(a, b):
return functions.scatter_add(a, self.slices, b)
gradient_check.check_backward(
f, (a_data, b_data), y_grad, **self.check_backward_options)
def test_backward_cpu(self):
self.check_backward(self.a_data, self.b_data, self.gy_data)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.a_data), cuda.to_gpu(self.b_data),
cuda.to_gpu(self.gy_data))
def check_double_backward(self, a_data, b_data, y_grad, a_grad_grad,
b_grad_grad):
def f(a, b):
return functions.scatter_add(a, self.slices, b)
gradient_check.check_double_backward(
f, (a_data, b_data), y_grad, (a_grad_grad, b_grad_grad),
**self.check_double_backward_options)
def test_double_backward_cpu(self):
self.check_double_backward(self.a_data, self.b_data, self.gy_data,
self.gga_data, self.ggb_data)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(cuda.to_gpu(self.a_data),
cuda.to_gpu(self.b_data),
cuda.to_gpu(self.gy_data),
cuda.to_gpu(self.gga_data),
cuda.to_gpu(self.ggb_data))
class TestInvalidScatterAdd(unittest.TestCase):
def setUp(self):
self.default_debug = chainer.is_debug()
chainer.set_debug(True)
self.a_data = numpy.random.uniform(-1, 1, (4, 3, 2))
self.b_data = numpy.random.uniform(-1, 1, (2, 2))
def tearDown(self):
chainer.set_debug(self.default_debug)
def test_multiple_ellipsis(self):
with self.assertRaises(ValueError):
functions.scatter_add(
self.a_data, (Ellipsis, Ellipsis), self.b_data)
def test_too_many_indices(self):
with self.assertRaises(type_check.InvalidType):
functions.scatter_add(self.a_data, (0, 0, 0, 0), self.b_data)
def test_requires_broadcasting(self):
with self.assertRaises(ValueError):
functions.scatter_add(self.a_data, slice(0, 2), self.b_data)
testing.run_module(__name__, __file__)
| {
"content_hash": "e9c011f4c564f659d93fe4e86509a000",
"timestamp": "",
"source": "github",
"line_count": 133,
"max_line_length": 79,
"avg_line_length": 37,
"alnum_prop": 0.5895143263564316,
"repo_name": "ktnyt/chainer",
"id": "2905328d888813a0a164c1a7dc17c162e250d184",
"size": "4921",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/functions_tests/array_tests/test_scatter_add.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3368"
},
{
"name": "C",
"bytes": "70"
},
{
"name": "C++",
"bytes": "1440363"
},
{
"name": "CMake",
"bytes": "42822"
},
{
"name": "Cuda",
"bytes": "53858"
},
{
"name": "Dockerfile",
"bytes": "1242"
},
{
"name": "PowerShell",
"bytes": "7197"
},
{
"name": "Python",
"bytes": "5128330"
},
{
"name": "Shell",
"bytes": "19475"
}
],
"symlink_target": ""
} |
__author__ = 'Charlie'
| {
"content_hash": "7d77edd88d970ccc16f9861e9564f484",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 22,
"avg_line_length": 23,
"alnum_prop": 0.5652173913043478,
"repo_name": "shekkizh/TensorflowProjects",
"id": "9eaafa3d0e8020541c3987459efd1bab9646be7f",
"size": "23",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "FaceDetection/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "254367"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import os
from os import path
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
import nib
from nib import Document, Resource, Render
from nib.processor import preprocessors, postprocessors,\
document_processors, resource_processors, markup_processors,\
render_processors
import nib.plugins
class Build(object):
def __init__(self, options):
self.document_path = options['document_path']
self.resource_path = options['resource_path']
self.output_path = options['output_path']
self.options = options
nib.plugins.load(options)
nib.instance(self)
def load(self):
documents = []
for root, dirs, files in os.walk(self.document_path):
for filename in files:
filepath = path.join(root, filename)
try:
print('Reading {}'.format(filepath))
document = Document.from_file(filepath, self.options)
documents.append(document)
except Exception as e:
print('Error while reading {}: {}'.format(filepath, e))
resources = []
for root, dirs, files in os.walk(self.resource_path):
for filename in files:
filepath = path.join(root, filename)
try:
print('Reading {}'.format(filepath))
resource = Resource.from_file(filepath, self.options)
resources.append(resource)
except Exception as e:
print('Error while reading {}: {}'.format(filepath, e))
return documents, resources
def process_documents(self, documents, resources):
# break documents into groups by type
documents_by_group = {}
for document in documents:
group = document.group
if group not in documents_by_group:
documents_by_group[group] = []
documents_by_group[group].append(document)
# process documents by group
documents = []
for group in documents_by_group:
group_documents = documents_by_group[group]
if group in document_processors:
processors = document_processors[group]
for p in processors:
print('Running document processor {}'.format(p))
group_documents, resources = p(self.options).process(group_documents, resources)
completed_documents = []
chained_documents = []
for document in group_documents:
if group is '' or document.group == group:
completed_documents.append(document)
else:
chained_documents.append(document)
if len(chained_documents):
chained_documents, resources = self.process_documents(chained_documents, resources)
documents.extend(chained_documents)
documents.extend(completed_documents)
return documents, resources
def process_resources(self, documents, resources):
# break resources into groups by extension
resources_by_group = {}
for resource in resources:
group = resource.extension
if group not in resources_by_group:
resources_by_group[group] = []
resources_by_group[group].append(resource)
# process resources by extension
resources = []
for group in resources_by_group:
group_resources = resources_by_group[group]
if group not in resource_processors:
group = ''
if group in resource_processors:
p = resource_processors[group]
print('Running resource processor {}'.format(p))
documents, group_resources = p(self.options).process(documents, group_resources)
completed_resources = []
chained_resources = []
for resource in group_resources:
if group is '' or resource.extension == group:
completed_resources.append(resource)
else:
chained_resources.append(resource)
if len(chained_resources):
documents, chained_resources = self.process_resources(documents, chained_resources)
resources.extend(chained_resources)
resources.extend(completed_resources)
return documents, resources
def process(self, documents, resources):
# preprocess everything
for p in preprocessors:
print('Running pre-processor {}'.format(p))
documents, resources = p(self.options).process(documents, resources)
documents, resources = self.process_documents(documents, resources)
documents, resources = self.process_resources(documents, resources)
# break documents into groups by extension
documents_by_group = {}
for document in documents:
group = document.extension
if group not in documents_by_group:
documents_by_group[group] = []
documents_by_group[group].append(document)
# render markup for all documents
documents = []
for extension in documents_by_group:
group_documents = documents_by_group[extension]
if extension not in markup_processors:
extension = ''
if extension in markup_processors:
processors = markup_processors[extension]
for p in processors:
print('Running markup processor {}'.format(p))
group_documents, resources = p(self.options).process(group_documents, resources)
documents.extend(group_documents)
# set default document uris
for document in documents:
if document.uri is None:
document.uri = document.path + document.extension
# postprocess everything
for p in postprocessors:
print('Running post-processor {}'.format(p))
documents, resources = p(self.options).process(documents, resources)
# finalize document uris
for document in documents:
document.uri = urljoin(urljoin(self.options['site']['uri'],
self.options['site']['root']),
document.uri)
if not document.get('link'):
document['link'] = document.uri
return documents, resources
def write(self, documents, resources):
render = Render(self.options, documents)
for document in documents:
print('Rendering content {}'.format(document.path))
render.render_content(document)
# pre-render final processing
for p in render_processors:
print('Running render processor {}'.format(p))
documents, resources = p(self.options).process(documents, resources)
for document in documents:
filepath = path.join(self.output_path, document.path)
filepath += document.extension
print('Rendering document {}'.format(filepath))
with open(filepath, 'w') as f:
f.write(render.render_template(document))
for resource in resources:
filepath = path.join(self.output_path, resource.path)
filepath += resource.extension
print('Writing resource {}'.format(filepath))
with open(filepath, 'wb') as f:
f.write(resource.content)
def create_output_hierarchy(self, documents, resources):
hierarchy = set()
for document in documents:
dirname = path.dirname(document.path)
hierarchy.add(path.join(self.output_path, dirname))
for resource in resources:
dirname = path.dirname(resource.path)
hierarchy.add(path.join(self.output_path, dirname))
print('Creating output hierarchy: {}'.format(hierarchy))
for dir in hierarchy:
try:
os.makedirs(dir)
except os.error:
pass
def run(self):
documents, resources = self.load()
documents, resources = self.process(documents, resources)
self.create_output_hierarchy(documents, resources)
self.write(documents, resources)
| {
"content_hash": "50636779330b40df934320ccd6ae1139",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 100,
"avg_line_length": 36.25316455696203,
"alnum_prop": 0.5874068901303539,
"repo_name": "jreese/nib",
"id": "d4018d497f0d5fa53d77575684ffb961ad614965",
"size": "8592",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nib/build.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "341"
},
{
"name": "Python",
"bytes": "69694"
}
],
"symlink_target": ""
} |
import numpy as np
import tensorflow as tf
import lucid.modelzoo.vision_models as models
import lucid.optvis.objectives as objectives
import lucid.optvis.param as param
import lucid.optvis.render as render
from lucid.misc.io import show, load
from lucid.misc.io.showing import _image_url, _display_html
from collections import defaultdict
class Node(object):
def __init__(self, name, op, graph, pretty_name=None):
self.name = name
self.op = op
self.graph = graph
self.pretty_name = pretty_name
def __repr__(self):
return "<%s: %s>" % (self.name, self.op)
@property
def inputs(self):
return self.graph.node_to_inputs[self.name]
@property
def consumers(self):
return self.graph.node_to_consumers[self.name]
def copy(self):
return Node(self.name, self.op, self.graph)
class Graph(object):
def __init__(self):
self.nodes = []
self.name_map = {}
self.node_to_consumers = defaultdict(lambda: [])
self.node_to_inputs = defaultdict(lambda: [])
def add_node(self, node):
self.nodes.append(node)
self.name_map[node.name] = node
def add_edge(self, node1, node2):
node1, node2 = self[node1], self[node2]
self.node_to_consumers[node1.name].append(node2)
self.node_to_inputs[node2.name].append(node1)
def __getitem__(self, index):
if isinstance(index, str):
return self.name_map[index]
elif isinstance(index, Node):
return self.name_map[index.name]
else:
raise Exception("Unsupported index for Graph", type(index) )
def graphviz(self, groups=None):
print("digraph G {")
if groups is not None:
for root, group in groups.items():
print("")
print((" subgraph", "cluster_%s" % root.name.replace("/", "_"), "{"))
print((" label = \"%s\"") % (root.pretty_name or root.name))
for node in group:
print((" \"%s\"") % (node.pretty_name or node.name))
print(" }")
for node in self.nodes:
for inp in node.inputs:
print((" ", '"' + (inp.pretty_name or inp.name) + '"', " -> ", '"' + (node.pretty_name or node.name) + '"'))
print("}")
@staticmethod
def from_graphdef(graphdef):
graph = Graph()
for raw_node in graphdef.node:
graph.add_node(Node(raw_node.name, raw_node.op, graph))
for raw_node in graphdef.node:
for raw_inp in raw_node.input:
if raw_inp.startswith('^'): # skip control inputs
continue
raw_inp_name = raw_inp.split(":")[0]
graph.add_edge(raw_inp_name, raw_node.name)
return graph
def filter_graph(graph, keep_nodes, pass_through=True):
new_graph = Graph()
for node in graph.nodes:
if node.name in keep_nodes:
new_node = node.copy()
new_node.graph = new_graph
new_node.subsumed = []
new_graph.add_node(new_node)
def kept_inputs(node):
ret = []
visited = []
def walk(inp):
if inp in visited: return
visited.append(inp)
if inp.name in keep_nodes:
ret.append(inp)
else:
if pass_through:
new_graph[node].subsumed.append(inp.name)
for inp2 in inp.inputs:
walk(inp2)
for inp in node.inputs:
walk(inp)
return ret
for node in graph.nodes:
if node.name in keep_nodes:
for inp in kept_inputs(node):
new_graph.add_edge(inp, node)
return new_graph
standard_include_ops = ["Placeholder", "Relu", "Relu6", "Add", "Split", "Softmax", "Concat", "ConcatV2", "Conv2D", "MaxPool", "AvgPool", "MatMul"] # Conv2D
def filter_graph_ops(graph, include_ops=standard_include_ops):
keep_nodes = [node.name for node in graph.nodes if node.op in include_ops]
return filter_graph(graph, keep_nodes)
def filter_graph_cut_shapes(graph):
keep_nodes = [node.name for node in graph.nodes if node.op != "Shape"]
return filter_graph(graph, keep_nodes, pass_through=False)
def filter_graph_dynamic(graph):
dynamic_nodes = []
def recursive_walk_forward(node):
if node.name in dynamic_nodes: return
dynamic_nodes.append(node.name)
for next in node.consumers:
recursive_walk_forward(next)
recursive_walk_forward(graph.nodes[0])
return filter_graph(graph, dynamic_nodes)
def filter_graph_collapse_sequence(graph, sequence):
exclude_nodes = []
for node in graph.nodes:
remainder = sequence[:]
matches = []
while remainder:
if len(node.consumers) > 1 and len(remainder) > 1:
break
if node.op == remainder[0]:
matches.append(node.name)
node = node.consumers[0]
remainder = remainder[1:]
else:
break
if len(remainder) == 0:
exclude_nodes += matches[:-1]
include_nodes = [node.name for node in graph.nodes
if node.name not in exclude_nodes]
return filter_graph(graph, include_nodes)
def clip_node_names(graph, prefix):
new_graph = Graph()
for node in graph.nodes:
new_node = node.copy()
new_node.graph = new_graph
new_node.subsumed = []
new_graph.add_node(new_node)
for inp in node.inputs:
new_graph.add_edge(inp, new_node)
for node in new_graph.nodes:
if node.name.startswith(prefix):
node.pretty_name = node.name[len(prefix):]
return new_graph
def find_groups(graph):
node_successors = {}
for node in graph.nodes:
node_successors[node.name] = set(node.inputs)
for inp in node.inputs:
node_successors[node.name] |= node_successors[inp.name]
concat_nodes = [node for node in graph.nodes
if node.op in ["Concat", "ConcatV2", "Add"] and len(node.inputs) > 1]
groups = {}
group_children = set()
for root_node in concat_nodes:
branch_heads = root_node.inputs
branch_nodes = [set([node]) | node_successors[node.name] for node in branch_heads]
branch_shared = set.intersection(*branch_nodes)
branch_uniq = set.union(*branch_nodes) - branch_shared
groups[root_node] = set([root_node]) | branch_uniq
group_children |= branch_uniq
for root in list(groups.keys()):
if root in group_children:
del groups[root]
return groups
| {
"content_hash": "4bc6665c064ac4dc2cf7c18828c446ef",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 155,
"avg_line_length": 26.88157894736842,
"alnum_prop": 0.634034915973242,
"repo_name": "tensorflow/lucid",
"id": "38c1a50de195af550d72d52f933f9f409e2bad2b",
"size": "6129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lucid/scratch/pretty_graphs/graph.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "4315"
},
{
"name": "JavaScript",
"bytes": "11689"
},
{
"name": "Jupyter Notebook",
"bytes": "184265269"
},
{
"name": "Python",
"bytes": "478323"
},
{
"name": "Shell",
"bytes": "2438"
}
],
"symlink_target": ""
} |
__author__ = 'SmileyBarry'
import requests
import sys
import time
from .consts import API_CALL_DOCSTRING_TEMPLATE, API_CALL_PARAMETER_TEMPLATE, IPYTHON_PEEVES, IPYTHON_MODE
from .decorators import Singleton, cached_property, INFINITE
from .errors import APIException, APIUnauthorized, APIKeyRequired, APIPrivate, APIConfigurationError
from . import errors
GET = "GET"
POST = "POST"
# A mapping of all types accepted/required by the API to their Python
# equivalents.
APITypes = {'bool': bool,
'int32': int,
'uint32': int,
'uint64': int,
'string': [str],
'rawbinary': [str, bytes]}
if sys.version_info.major < 3:
# Starting with Python 3, "str" means unicode and "unicode" is not defined. It is
# still relevant for Python 2.x, however.
APITypes['string'] += [unicode]
APITypes['rawbinary'] += [buffer]
class APICall(object):
def __init__(self, api_id, parent, method=None):
"""
Create a new APICall instance.
:param api_id: The API's string-based ID. Must start with a letter, as per Python's rules for attributes.
:type api_id: str
:param parent: The APICall parent of this object. If this is a service or interface, an APIInterface instance is
given instead.
:type parent: APICall or APIInterface
:param method: The HTTP method used for calling the API.
:type method: str
:return: A new instance of APICall.
:rtype: APICall
"""
self._api_id = api_id
self._is_registered = False
self._parent = parent
self._method = method
# Cached data.
self._cached_key = None
self._query = ""
# Set an empty documentation for now.
self._api_documentation = ""
@property
def _api_key(self):
"""
Fetch the appropriate API key, if applicable.
If a key is defined in this call's APIInterface "grandparent" (since each APICall has a APICall parent), it is
used and cached by this object indefinitely. (Until destruction)
Otherwise, nothing (None) will be returned.
:return: A Steam Web API key in the form of a string, or None if not available.
:rtype: str or None
"""
if self._cached_key is not None:
return self._cached_key
if self._parent is not None:
self._cached_key = self._parent._api_key
return self._cached_key
# No key is available. (This is OK)
return None
def _build_query(self):
if self._query != "":
return self._query
# Build the query by calling "str" on ourselves, which recursively
# calls "str" on each parent in the chain.
self._query = str(self)
return self._query
def __str__(self):
"""
Generate the function URL.
"""
if isinstance(self._parent, APIInterface):
return self._parent._query_template + self._api_id + '/'
else:
return str(self._parent) + self._api_id + '/'
@cached_property(ttl=INFINITE)
def _full_name(self):
if self._parent is None:
return self._api_id
else:
return self._parent._full_name + '.' + self._api_id
def __repr__(self):
if self._is_registered is True:
# This is a registered, therefore working, API.
note = "(verified)"
else:
note = "(unconfirmed)"
return "<{cls} {full_name} {api_note}>".format(cls=self.__class__.__name__,
full_name=self._full_name,
api_note=note)
def __getattribute__(self, item):
if item.startswith('_'):
# Underscore items are special.
return super(APICall, self).__getattribute__(item)
else:
try:
return super(APICall, self).__getattribute__(item)
except AttributeError:
if IPYTHON_MODE is True:
# We're in IPython. Which means "getdoc()" is also
# automatically used for docstrings!
if item == "getdoc":
return lambda: self._api_documentation
elif item in IPYTHON_PEEVES:
# IPython always looks for this, no matter what (hiding it in __dir__ doesn't work), so this is
# necessary to keep it from constantly making new
# APICall instances. (a significant slowdown)
raise
# Not an expected item, so generate a new APICall!
return APICall(item, self)
def __iter__(self):
return self.__dict__.__iter__()
def _set_documentation(self, docstring):
"""
Set a docstring specific to this instance of APICall, explaining the bound function.
:param docstring: The relevant docstring.
:return: None
"""
self._api_documentation = docstring
def _register(self, apicall_child=None):
"""
Register a child APICall object under the "self._resolved_children" dictionary so it can be used
normally. Used by API function wrappers after they're deemed working.
:param apicall_child: A working APICall object that should be stored as resolved.
:type apicall_child: APICall
"""
if apicall_child is not None:
if apicall_child._api_id in self.__dict__ \
and apicall_child is not self.__dict__[apicall_child._api_id]:
raise KeyError(
"This API ID is already taken by another API function!")
if not isinstance(self._parent, APIInterface):
self._parent._register(self)
else:
self._is_registered = True
if apicall_child is not None:
self.__setattr__(apicall_child._api_id, apicall_child)
apicall_child._is_registered = True
def _convert_arguments(self, kwargs):
"""
Convert the types of given arguments to a call-friendly format. Modifies the given dictionary directly.
:param kwargs: The keyword-arguments dictionary, passed on to the calling function.
:type kwargs: dict
:return: None, as the given dictionary is changed in-place.
:rtype: None
"""
for argument in kwargs:
if issubclass(type(kwargs[argument]), list):
# The API takes multiple values in a "a,b,c" structure, so we
# have to encode it in that way.
kwargs[argument] = ','.join(kwargs[argument])
elif issubclass(type(kwargs[argument]), bool):
# The API treats True/False as 1/0. Convert it.
if kwargs[argument] is True:
kwargs[argument] = 1
else:
kwargs[argument] = 0
def __call__(self, method=GET, **kwargs):
self._convert_arguments(kwargs)
automatic_parsing = True
if "format" in kwargs:
automatic_parsing = False
else:
kwargs["format"] = "json"
if self._api_key is not None:
kwargs["key"] = self._api_key
# Format the final query.
query = str(self)
if self._method is not None:
method = self._method
if method == POST:
response = requests.request(method, query, data=kwargs)
else:
response = requests.request(method, query, params=kwargs)
errors.check(response)
# Store the object for future reference.
if self._is_registered is False:
self._parent._register(self)
if automatic_parsing is True:
response_obj = response.json()
if len(response_obj.keys()) == 1 and 'response' in response_obj:
return APIResponse(response_obj['response'])
else:
return APIResponse(response_obj)
else:
if kwargs["format"] == "json":
return response.json()
else:
return response.content
class APIInterface(object):
def __init__(self, api_key=None, autopopulate=False, strict=False,
api_domain="api.steampowered.com", api_protocol="http", settings=None,
validate_key=False):
"""
Initialize a new APIInterface object. This object defines an API-interacting session, and is used to call
any API functions from standard code.
:param api_key: Your Steam Web API key. Can be left blank, but some APIs will not work.
:type api_key: str
:param autopopulate: Whether the interfaces, services and methods supported by the Steam Web API should be \
auto-populated during initialization.
:type autopopulate: bool
:param strict: Should the interface enforce access only to defined functions, and only as defined. Only \
applicable if :var autopopulate: is True.
:type strict: bool
:param api_domain:
:param settings: A dictionary which defines advanced settings.
:type settings: dict
:param validate_key: Perform a test call to the API with the given key to ensure the key is valid & working.
:return:
"""
if autopopulate is False and strict is True:
raise ValueError(
"\"strict\" is only applicable if \"autopopulate\" is set to True.")
if api_protocol not in ("http", "https"):
raise ValueError(
"\"api_protocol\" must either be \"http\" or \"https\".")
if '/' in api_domain:
raise ValueError(
"\"api_domain\" should only contain the domain name itself, without any paths or queries.")
if issubclass(type(api_key), str) and len(api_key) == 0:
# We were given an empty key (== no key), but the API's equivalent
# of "no key" is None.
api_key = None
if settings is None:
# Working around mutable argument defaults.
settings = dict()
super_self = super(type(self), self)
# Initialization routines must use the original __setattr__ function, because they might collide with the
# overridden "__setattr__", which expects a fully-built instance to
# exist before being called.
def set_attribute(name, value):
return super_self.__setattr__(name, value)
set_attribute('_api_key', api_key)
set_attribute('_strict', strict)
set_attribute('_settings', settings)
query_template = "{proto}://{domain}/".format(
proto=api_protocol, domain=api_domain)
set_attribute('_query_template', query_template)
if autopopulate is True:
# TODO: Autopopulation should be long-term-cached somewhere for
# future use, since it won't change much.
# Regardless of "strict mode", it has to be OFF during
# auto-population.
original_strict_value = self._strict
try:
self.__dict__['_strict'] = False
self._autopopulate_interfaces()
finally:
self.__dict__['_strict'] = original_strict_value
elif validate_key is True:
if api_key is None:
raise ValueError(
'"validate_key" is True, but no key was given.')
# Call "GetSupportedAPIList", which is guaranteed to succeed with
# any valid key. (Or no key)
try:
self.ISteamWebAPIUtil.GetSupportedAPIList.v1(key=self._api_key)
except (APIUnauthorized, APIKeyRequired, APIPrivate):
raise APIConfigurationError("This API key is invalid.")
def _autopopulate_interfaces(self):
# Call the API which returns a list of API Services and Interfaces.
# API definitions describe how the Interfaces and Services are built
# up, including parameter names & types.
api_definition = self.ISteamWebAPIUtil.GetSupportedAPIList.v1(
key=self._api_key)
for interface in api_definition.apilist.interfaces:
interface_object = APICall(interface.name, self)
parameter_description = API_CALL_PARAMETER_TEMPLATE.format(
indent='\t')
for method in interface.methods:
if method.name in interface_object:
base_method_object = interface_object.__getattribute__(
method.name)
else:
base_method_object = APICall(
method.name, interface_object, method.httpmethod)
# API calls have version-specific definitions, so backwards compatibility could be maintained.
# However, the Web API returns versions as integers (1, 2,
# etc.) but accepts them as "v?" (v1, v2, etc.)
method_object = APICall(
'v' + str(method.version), base_method_object, method.httpmethod)
parameters = []
for parameter in method.parameters:
parameter_requirement = "REQUIRED"
if parameter.optional is True:
parameter_requirement = "OPTIONAL"
if 'description' in parameter:
desc = parameter.description
else:
desc = "(no description)"
parameters += [parameter_description.format(requirement=parameter_requirement,
type=parameter.type,
name=parameter.name,
desc=desc)]
# Now build the docstring.
func_docstring = API_CALL_DOCSTRING_TEMPLATE.format(name=method.name,
parameter_list='\n'.join(parameters))
# Set the docstring appropriately
method_object._api_documentation = func_docstring
# Now call the standard registration method.
method_object._register()
# And now, add it to the APIInterface.
setattr(self, interface.name, interface_object)
def __getattr__(self, name):
"""
Creates a new APICall() instance if "strict" is disabled.
:param name: A Service or Interface name.
:return: A Pythonic object used to access the remote Service or Interface. (APICall)
:rtype: APICall
"""
if name.startswith('_'):
return super(type(self), self).__getattribute__(name)
elif name in IPYTHON_PEEVES:
# IPython always looks for this, no matter what (hiding it in __dir__ doesn't work), so this is
# necessary to keep it from constantly making new APICall
# instances. (a significant slowdown)
raise AttributeError()
else:
if self._strict is True:
raise AttributeError("Strict '{cls}' object has no attribute '{attr}'".format(cls=type(self).__name__,
attr=name))
new_service = APICall(name, self)
# Save this service.
self.__dict__[name] = new_service
return new_service
def __setattr__(self, name, value):
if self._strict is True:
raise AttributeError("Cannot set attributes to a strict '{cls}' object.".format(
cls=type(self).__name__))
else:
return super(type(self), self).__setattr__(name, value)
@Singleton
class APIConnection(object):
QUERY_DOMAIN = "http://api.steampowered.com"
# Use double curly-braces to tell Python that these variables shouldn't be
# expanded yet.
QUERY_TEMPLATE = "{domain}/{{interface}}/{{command}}/{{version}}/".format(
domain=QUERY_DOMAIN)
def __init__(self, api_key=None, settings={}, validate_key=False):
"""
NOTE: APIConnection will soon be made deprecated by APIInterface.
Initialise the main APIConnection. Since APIConnection is a singleton object, any further "initialisations"
will not re-initialise the instance but just retrieve the existing instance. To reassign an API key,
retrieve the Singleton instance and call "reset" with the key.
:param api_key: A Steam Web API key. (Optional, but recommended)
:param settings: A dictionary of advanced tweaks. Beware! (Optional)
precache -- True/False. (Default: True) Decides whether attributes that retrieve
a group of users, such as "friends", should precache player summaries,
like nicknames. Recommended if you plan to use nicknames right away, since
caching is done in groups and retrieving one-by-one takes a while.
:param validate_key: Perform a test call to the API with the given key to ensure the key is valid & working.
"""
self.reset(api_key)
self.precache = True
if 'precache' in settings and issubclass(
type(settings['precache']), bool):
self.precache = settings['precache']
if validate_key:
if api_key is None:
raise ValueError(
'"validate_key" is True, but no key was given.')
# Call "GetSupportedAPIList", which is guaranteed to succeed with
# any valid key. (Or no key)
try:
self.call("ISteamWebAPIUtil", "GetSupportedAPIList", "v1")
except (APIUnauthorized, APIKeyRequired, APIPrivate):
raise APIConfigurationError("This API key is invalid.")
def reset(self, api_key):
self._api_key = api_key
def call(self, interface, command, version, method=GET, **kwargs):
"""
Call an API command. All keyword commands past method will be made into GET/POST-based commands,
automatically.
:param interface: Interface name that contains the requested command. (E.g.: "ISteamUser")
:param command: A matching command. (E.g.: "GetPlayerSummaries")
:param version: The version of this API you're using. (Usually v000X or vX, with "X" standing in for a number)
:param method: Which HTTP method this call should use. GET by default, but can be overriden to use POST for
POST-exclusive APIs or long parameter lists.
:param kwargs: A bunch of keyword arguments for the call itself. "key" and "format" should NOT be specified.
If APIConnection has an assoociated key, "key" will be overwritten by it, and overriding "format"
cancels out automatic parsing. (The resulting object WILL NOT be an APIResponse but a string.)
:rtype: APIResponse
"""
for argument in kwargs:
if isinstance(kwargs[argument], list):
# The API takes multiple values in a "a,b,c" structure, so we
# have to encode it in that way.
kwargs[argument] = ','.join(kwargs[argument])
elif isinstance(kwargs[argument], bool):
# The API treats True/False as 1/0. Convert it.
if kwargs[argument] is True:
kwargs[argument] = 1
else:
kwargs[argument] = 0
automatic_parsing = True
if "format" in kwargs:
automatic_parsing = False
else:
kwargs["format"] = "json"
if self._api_key is not None:
kwargs["key"] = self._api_key
query = self.QUERY_TEMPLATE.format(
interface=interface, command=command, version=version)
if method == POST:
response = requests.request(method, query, data=kwargs)
else:
response = requests.request(method, query, params=kwargs)
errors.check(response)
if automatic_parsing is True:
response_obj = response.json()
if len(response_obj.keys()) == 1 and 'response' in response_obj:
return APIResponse(response_obj['response'])
else:
return APIResponse(response_obj)
class APIResponse(object):
"""
A dict-proxying object which objectifies API responses for prettier code,
easier prototyping and less meaningless debugging ("Oh, I forgot square brackets.").
Recursively wraps every response given to it, by replacing each 'dict' object with an
APIResponse instance. Other types are safe.
"""
def __init__(self, father_dict):
# Initialize an empty dictionary.
self._real_dictionary = {}
# Recursively wrap the response in APIResponse instances.
for item in father_dict:
if isinstance(father_dict[item], dict):
self._real_dictionary[item] = APIResponse(father_dict[item])
elif isinstance(father_dict[item], list):
self._real_dictionary[item] = APIResponse._wrap_list(
father_dict[item])
else:
self._real_dictionary[item] = father_dict[item]
@staticmethod
def _wrap_list(original_list):
"""
Receives a list of items and recursively wraps any dictionaries inside it as APIResponse
objects. Resolves issue #12.
:param original_list: The original list that needs wrapping.
:type original_list: list
:return: A near-identical list, with "dict" objects replaced into APIResponse ones.
:rtype: list
"""
new_list = []
for item in original_list:
if isinstance(item, dict):
new_list += [APIResponse(item)]
elif isinstance(item, list):
new_list += [APIResponse._wrap_list(item)]
else:
new_list += [item]
return new_list
def __repr__(self):
return dict.__repr__(self._real_dictionary)
@property
def __dict__(self):
return self._real_dictionary
def __getattribute__(self, item):
if item.startswith("_"):
return super(APIResponse, self).__getattribute__(item)
else:
if item in self._real_dictionary:
return self._real_dictionary[item]
else:
raise AttributeError("'{cls}' has no attribute '{attr}'".format(cls=type(self).__name__,
attr=item))
def __getitem__(self, item):
return self._real_dictionary[item]
def __iter__(self):
return self._real_dictionary.__iter__()
class SteamObject(object):
"""
A base class for all rich Steam objects. (SteamUser, SteamApp, etc.)
"""
@property
def id(self):
return self._id # "_id" is set by the child class.
def __repr__(self):
try:
return '<{clsname} "{name}" ({id})>'.format(clsname=self.__class__.__name__,
name=_shims.sanitize_for_console(
self.name),
id=self._id)
except (AttributeError, APIException):
return '<{clsname} ({id})>'.format(
clsname=self.__class__.__name__, id=self._id)
def __eq__(self, other):
"""
:type other: SteamObject
"""
# Use a "hash" of each object to prevent cases where derivative classes sharing the
# same ID, like a user and an app, would cause a match if compared
# using ".id".
return hash(self) == hash(other)
def __ne__(self, other):
"""
:type other: SteamObject
"""
return not self == other
def __hash__(self):
return hash(self.id)
def store(obj, property_name, data, received_time=0):
"""
Store data inside the cache of a cache-enabled object. Mainly used for pre-caching.
:param obj: The target object.
:type obj: SteamObject
:param property_name: The destination property's name.
:param data: The data that we need to store inside the object's cache.
:type data: object
:param received_time: The time this data was retrieved. Used for the property cache.
Set to 0 to use the current time.
:type received_time: float
"""
if received_time == 0:
received_time = time.time()
# Just making sure caching is supported for this object...
if issubclass(type(obj), SteamObject) or hasattr(obj, "_cache"):
obj._cache[property_name] = (data, received_time)
else:
raise TypeError(
"This object type either doesn't visibly support caching, or has yet to initialise its cache.")
def expire(obj, property_name):
"""
Expire a cached property
:param obj: The target object.
:type obj: SteamObject
:param property_name:
:type property_name:
"""
if issubclass(type(obj), SteamObject) or hasattr(obj, "_cache"):
del obj._cache[property_name]
else:
raise TypeError(
"This object type either doesn't visibly support caching, or has yet to initialise its cache.")
def chunker(seq, size):
"""
Turn an iteratable into a iterable of iterables of size
:param seq: The target iterable
:type seq: iterable
:param size: The max size of the resulting batches
:type size: int
:rtype: iterable
"""
return (seq[pos:pos + size] for pos in range(0, len(seq), size))
class _shims:
"""
A collection of functions used at junction points where a Python 3.x solution potentially degrades functionality
or performance on Python 2.x.
"""
class Python2:
@staticmethod
def sanitize_for_console(string):
"""
Sanitize a string for console presentation. On Python 2, it decodes Unicode string back to ASCII, dropping
non-ASCII characters.
"""
return string.encode(errors="ignore")
class Python3:
@staticmethod
def sanitize_for_console(string):
"""
Sanitize a string for console presentation. Does nothing on Python 3.
"""
return string
if sys.version_info.major >= 3:
sanitize_for_console = Python3.sanitize_for_console
else:
sanitize_for_console = Python2.sanitize_for_console
sanitize_for_console = staticmethod(sanitize_for_console)
| {
"content_hash": "731e3f03077bf1db9b399aca02931031",
"timestamp": "",
"source": "github",
"line_count": 685,
"max_line_length": 120,
"avg_line_length": 39.465693430656934,
"alnum_prop": 0.5747207220537102,
"repo_name": "smiley/steamapi",
"id": "3653b32f0991d35061c1b0a2457a6d7c453baa97",
"size": "27034",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "steamapi/core.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "61641"
}
],
"symlink_target": ""
} |
__author__ = 'Georgios Rizos ([email protected])'
import itertools
from itertools import islice, zip_longest
import numpy as np
def grouper(iterable, n, pad_value=None):
"""
Returns a generator of n-length chunks of an input iterable, with appropriate padding at the end.
Example: grouper(3, 'abcdefg', 'x') --> ('a','b','c'), ('d','e','f'), ('g','x','x')
Inputs: - iterable: The source iterable that needs to be chunkified.
- n: The size of the chunks.
- pad_value: The value with which the last chunk will be padded.
Output: - chunk_gen: A generator of n-length chunks of an input iterable.
"""
chunk_gen = (chunk for chunk in zip_longest(*[iter(iterable)]*n, fillvalue=pad_value))
return chunk_gen
def chunks(iterable, n):
"""
A python generator that yields 100-length sub-list chunks.
Input: - full_list: The input list that is to be separated in chunks of 100.
- chunk_size: Should be set to 100, unless the Twitter API changes.
Yields: - sub_list: List chunks of length 100.
"""
for i in np.arange(0, len(iterable), n):
yield iterable[i:i+n]
def split_every(iterable, n): # TODO: Remove this, or make it return a generator.
"""
A generator of n-length chunks of an input iterable
"""
i = iter(iterable)
piece = list(islice(i, n))
while piece:
yield piece
piece = list(islice(i, n))
def parallel_chunks(l, n):
for thread_id in range(n):
yield roundrobin_chunks(l, n, thread_id)
def roundrobin_chunks(l, n, id):
l_c = iter(l)
x = list(itertools.islice(l_c, id, None, n))
if len(x):
return x
| {
"content_hash": "6336a41923afb49efd1d975e48f3856f",
"timestamp": "",
"source": "github",
"line_count": 58,
"max_line_length": 101,
"avg_line_length": 29.086206896551722,
"alnum_prop": 0.6271487848251334,
"repo_name": "MKLab-ITI/reveal-user-annotation",
"id": "079834fba7cd7698952619a941be14233716ea4d",
"size": "1687",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reveal_user_annotation/text/map_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "129415"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function, absolute_import
default_app_config = "reviewers.apps.ReviewersConfig"
review_group_name = "reviewers"
| {
"content_hash": "e355aa163ecfe9eb909376e7068c7e6b",
"timestamp": "",
"source": "github",
"line_count": 6,
"max_line_length": 72,
"avg_line_length": 27,
"alnum_prop": 0.7777777777777778,
"repo_name": "olea/PyConES-2016",
"id": "94a81b72a5bcc33fc693f32a5dc59410c6be8c15",
"size": "186",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "pycones/reviewers/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "142786"
},
{
"name": "HTML",
"bytes": "88272"
},
{
"name": "JavaScript",
"bytes": "677"
},
{
"name": "Python",
"bytes": "304958"
},
{
"name": "Shell",
"bytes": "488"
}
],
"symlink_target": ""
} |
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/structure/city/shared_garden_small.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | {
"content_hash": "e930d082fd0089b0541f27bbd95885c5",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 82,
"avg_line_length": 23.923076923076923,
"alnum_prop": 0.6977491961414791,
"repo_name": "anhstudios/swganh",
"id": "c117786085a1e00d99a8b3d78e8313ffd5021be9",
"size": "456",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "data/scripts/templates/object/draft_schematic/structure/city/shared_garden_small.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "11887"
},
{
"name": "C",
"bytes": "7699"
},
{
"name": "C++",
"bytes": "2357839"
},
{
"name": "CMake",
"bytes": "41264"
},
{
"name": "PLSQL",
"bytes": "42065"
},
{
"name": "Python",
"bytes": "7503510"
},
{
"name": "SQLPL",
"bytes": "42770"
}
],
"symlink_target": ""
} |
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
sys.path.insert(0, os.path.abspath('../../'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.todo',
'sphinx.ext.coverage',
'sphinx.ext.viewcode',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Gmail Client'
copyright = u'2014, Wilberto Morales'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.4'
# The full version, including alpha/beta/rc tags.
release = '0.0.4'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# use readthedocs awesome theme locally
# on_rtd is whether we are on readthedocs.org
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only import and set the theme if we're building docs locally
import sphinx_rtd_theme
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
#html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
#html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
#html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'GmailClientdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'GmailClient.tex', u'Gmail Client Documentation',
u'Wilberto Morales', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'gmailclient', u'Gmail Client Documentation',
[u'Wilberto Morales'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'GmailClient', u'Gmail Client Documentation',
u'Wilberto Morales', 'GmailClient', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| {
"content_hash": "5dced5f1a515b1ea4224ab40357c32e2",
"timestamp": "",
"source": "github",
"line_count": 277,
"max_line_length": 79,
"avg_line_length": 32.64259927797834,
"alnum_prop": 0.7039371820393718,
"repo_name": "bepetersn/gmail_client",
"id": "f53b65772e7010f25a5c3a8074106b53c10f09c6",
"size": "9467",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "docs/source/conf.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "30233"
}
],
"symlink_target": ""
} |
'''
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
os.system("cqlsh -k titan -e 'drop keyspace titan;'")
os.system(os.environ['TITAN_HOME'] + "/bin/gremlin.sh set_schema.groovy")
| {
"content_hash": "f813fda87bfd19382c752d39bfbeff5a",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 73,
"avg_line_length": 37.333333333333336,
"alnum_prop": 0.7648809523809523,
"repo_name": "akon-dey/ground",
"id": "35c825fd435fe926eb0c67e776ab99f5151aa0be",
"size": "672",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "ground-core/scripts/gremlin/titan_setup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Groovy",
"bytes": "2460"
},
{
"name": "Java",
"bytes": "715880"
},
{
"name": "PLpgSQL",
"bytes": "4463"
},
{
"name": "Python",
"bytes": "8662"
},
{
"name": "Shell",
"bytes": "12225"
}
],
"symlink_target": ""
} |
from pymongo import MongoClient
from bson.objectid import ObjectId
import os
import json
DATABASE_URI = os.getenv('MONGOLAB_URI')
database = MongoClient(DATABASE_URI)
database.drop_database(database.get_default_database())
db = database.get_default_database()
os.chdir('..')
basedir = os.path.abspath(os.curdir) + '/data/'
def parse_json(filename, collectionname):
with open(filename, 'r') as f:
parsed = json.loads(f.read())
for record in parsed:
try:
x = str(record['_id']['$oid'])
del record['_id']['$oid']
record['_id'] = ObjectId(x)
except TypeError:
pass
collectionname.insert(record, check_keys=False)
parse_json(basedir + 'answer.json', db.answers)
parse_json(basedir + 'question.json', db.questions)
parse_json(basedir + 'user.json', db.users)
| {
"content_hash": "40cee27123e2bcc965a0dfad76258e83",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 55,
"avg_line_length": 27.35483870967742,
"alnum_prop": 0.652122641509434,
"repo_name": "LNM-HoverSpace/HoverSpace",
"id": "03bf60e3226454f81b4d6ed388d3ba152d42e88f",
"size": "848",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "scripts/populateDB.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "20801"
},
{
"name": "HTML",
"bytes": "22234"
},
{
"name": "JavaScript",
"bytes": "4606"
},
{
"name": "Python",
"bytes": "42898"
}
],
"symlink_target": ""
} |
from pytest import raises, skip
from decimal import Decimal
from mio import runtime
from mio.utils import Null
from mio.state import Completer
from mio.errors import AttributeError, Error
# Supported Types
# {
# dict: "Dict"
# list: "List"
# tuple: "Tuple",
# unicode: "String"
# str: "Bytes",
# bool: "Boolean"
# Decimal: "Number"
# }
class Foo(object):
"""Foo Class
mio does not support coerving Python user classes, methods or
objects. Trying to convert these to mio with ``runtime.state.tomio(...)``
will fail and if a ``default`` value is passed will return that.
"""
def foo(self):
"""foo method"""
def test_tomio_class(mio):
assert runtime.state.tomio(Foo, Null) is Null
def test_tomio_object(mio):
foo = Foo()
assert runtime.state.tomio(foo, Null) is Null
def test_tomio_method(mio):
foo = Foo()
assert runtime.state.tomio(foo.foo, Null) is Null
def test_frommio_Number(mio):
assert runtime.state.frommio(mio.eval("1.0")) == Decimal(1.0)
def test_tomio_Number(mio):
assert runtime.state.tomio(1.0) == mio.eval("1.0")
def test_frommio_Boolean(mio):
assert runtime.state.frommio(mio.eval("True")) is True
def test_tomio_Boolean(mio):
# XXX: FIXME: This should be the same identity
assert runtime.state.tomio(True) == mio.eval("True")
def test_frommio_String(mio):
assert runtime.state.frommio(mio.eval("String clone()")) == ""
def test_tomio_String(mio):
assert runtime.state.tomio("") == mio.eval("String clone()")
def test_frommio_List(mio):
assert runtime.state.frommio(mio.eval("List clone()")) == []
def test_tomio_List(mio):
assert runtime.state.tomio([]) == mio.eval("List clone()")
def test_frommio_Tuple(mio):
assert runtime.state.frommio(mio.eval("Tuple clone()")) == ()
def test_tomio_Tuple(mio):
assert runtime.state.tomio(()) == mio.eval("Tuple clone()")
def test_frommio_Dict(mio):
assert runtime.state.frommio(mio.eval("Dict clone()")) == {}
def test_tomio_Dict(mio):
assert runtime.state.tomio({}) == mio.eval("Dict clone()")
def test_tomio_default(mio):
assert runtime.state.frommio(runtime.state.tomio(type(None))) is None
def test_error(mio, capfd):
with raises(AttributeError):
mio.eval("foobar()", reraise=True)
out, err = capfd.readouterr()
assert out == "\n AttributeError: Object has no attribute 'foobar'\n ---------------\n foobar\n\n"
def test_usererror(mio, capfd):
with raises(Error):
mio.eval("raise TypeError", reraise=True)
out, err = capfd.readouterr()
assert out == "\n TypeError: \n ----------\n raise(TypeError)\n\n"
def test_runsource(mio):
assert runtime.state.runsource("(1 + 2")
def test_runsource2(mio, capfd):
assert runtime.state.runsource("(1 + 2)") is None
out, err = capfd.readouterr()
assert out == "===> 3\n"
def test_completer(mio):
skip("XXX: Broken")
completer = Completer(mio)
assert completer.complete("", 0) == "Core"
def test_completer2(mio):
skip("XXX: Broken")
completer = Completer(mio)
assert completer.complete("Root ", 0) == "Root Core"
def test_completer3(mio):
skip("XXX: Broken")
completer = Completer(mio)
assert completer.complete("Root bu", 0) == "Root builtins"
def test_completer4(mio):
completer = Completer(mio)
assert completer.complete("Root asdf ", 0) is None
def test_completer5(mio):
skip("XXX: Broken")
completer = Completer(mio)
assert completer.complete("", 0) == "Core"
assert completer.complete("", len(completer.matches)) is None
def test_completer6(mio, capfd):
completer = Completer(mio)
completer.display_matches("Root", ["Root Core", "Root Types"], 10)
out, err = capfd.readouterr()
assert out == "\n Core Types \nmio> "
def test_completer7(mio, capfd):
completer = Completer(mio)
completer.display_matches("Root", ["Root Core"] * 100, 9)
out, err = capfd.readouterr()
assert set(out.split()[:-1]) == set(["Core"])
| {
"content_hash": "ea45296231de44458345c9fb6ee5b07f",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 105,
"avg_line_length": 22.29891304347826,
"alnum_prop": 0.6417255666585425,
"repo_name": "prologic/mio",
"id": "2fd7046c6eef2d729d88ee2af55e99657d940542",
"size": "4103",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_state.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "15260"
},
{
"name": "Python",
"bytes": "191874"
},
{
"name": "Shell",
"bytes": "5303"
},
{
"name": "VimL",
"bytes": "1477"
}
],
"symlink_target": ""
} |
"""Ce fichier définit un conteneur de groupe. Il doit n'y voir qu'un conteneur
de groupes et c'est de ce fait à la fois une classe singleton implicite
dérivée de BaseObj.
"""
from abstraits.obase import BaseObj
from primaires.interpreteur.groupe.groupe import *
class ConteneurGroupes(BaseObj):
"""Classe conteneur des groupes.
Elle peut être soit créée directement par le système si le fichier
n'existe pas, soit récupérée depuis son fichier de sauvegarde.
"""
_nom = "groupes_commandes"
_version = 1
enregistrer = True
def __init__(self):
"""Constructeur du conteneur."""
BaseObj.__init__(self)
self._groupes = {} # nom_groupe:groupe
self._construire()
# Dictionnaire associant une adresse de commande à un groupe
self.commandes = {}
def __getnewargs__(self):
return ()
def __contains__(self, nom_groupe):
"""Retourne True si le groupe est dans le dictionnaire, False sinon"""
return nom_groupe in self._groupes.keys()
def __getitem__(self, nom_groupe):
"""Retourne le groupe avec le nom spécifié"""
return self._groupes[nom_groupe]
def __len__(self):
"""Retourne le nombre de groupes"""
return len(self._groupes)
@property
def nom_groupes(self):
"""Retourne une liste des noms des groupes existants."""
return sorted([g.nom for g in self._groupes.values()])
def ajouter_groupe(self, nom_groupe, flags=AUCUN):
"""Méthode appelée pour ajouter un groupe.
L'objet Groupe est créé "à la volée" et est retourné par la méthode si
l'on désire le manipuler directement.
"""
groupe = Groupe(self, nom_groupe, flags)
self._groupes[nom_groupe] = groupe
return groupe
def supprimer_groupe(self, nom_groupe):
"""Supprime le groupe nom_groupe"""
del self._groupes[nom_groupe]
def ajouter_commande(self, commande):
"""Ajout de 'commande' dans son groupe"""
if not commande.adresse in self.commandes.keys():
groupe = self[commande.groupe]
self.commandes[commande.adresse] = groupe
def supprimer_commande(self, commande):
"""On supprime la commande 'commande'.
"""
del self.commandes[commande.adresse]
def changer_groupe_commande(self, chemin, nom_groupe):
"""Change le groupe d'une commande.
"""
nouveau_groupe = self[nom_groupe]
self.commandes[chemin] = nouveau_groupe
def personnage_a_le_droit(self, personnage, commande):
"""Le personnage a-t-il le droit d'appeler 'commande' ?"""
if personnage.nom_groupe in self:
groupe_png = self[personnage.nom_groupe]
else:
groupe_png = self["pnj"] # droits minimums
groupe_cmd = self.commandes[commande.adresse]
return self.explorer_groupes_inclus(groupe_png, groupe_cmd.nom)
def explorer_groupes_inclus(self, groupe_base, cherche):
"""Explore les groupes inclus de 'groupe_base', récursivement.
Si le groupe groupe_base ou l'un des groupes inclus a pour nom
'cherche', retourne True, False sinon.
"""
trouve = False
if cherche == groupe_base.nom:
trouve = True
else:
for nom_groupe in groupe_base.groupes_inclus:
if nom_groupe == cherche:
trouve = True
break
r_groupe = self[nom_groupe]
trouve = self.explorer_groupes_inclus(r_groupe, cherche)
if trouve:
break
return trouve
| {
"content_hash": "6c9f321aa2ec3bd32acf6a4ae7ed341e",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 78,
"avg_line_length": 32,
"alnum_prop": 0.6125,
"repo_name": "vlegoff/tsunami",
"id": "5cfa77ff9f6d49bb9bb54ebcafcf992153e772a8",
"size": "5271",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/interpreteur/groupe/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7930908"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""
SESMailer
"""
from setuptools import setup, find_packages
import ses_mailer
PACKAGE = ses_mailer
setup(
name=PACKAGE.__NAME__,
version=PACKAGE.__version__,
license=PACKAGE.__license__,
author=PACKAGE.__author__,
author_email='[email protected]',
description="A simple module to send email via AWS SES",
long_description=PACKAGE.__doc__,
url='http://github.com/mardix/ses-mailer/',
download_url='http://github.com/mardix/ses-mailer/tarball/master',
py_modules=['ses_mailer'],
include_package_data=True,
install_requires=[
"boto",
"jinja2"
],
keywords=['email',
'flask',
'aws ses',
'amazon',
'ses',
'mailer',
'jinja2',
'template email'],
platforms='any',
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
packages=find_packages(exclude=["test_config.py"]),
zip_safe=False
)
| {
"content_hash": "53ff26566942f0ee79d5c2bd03e5f283",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 70,
"avg_line_length": 28.92452830188679,
"alnum_prop": 0.5727332028701891,
"repo_name": "mardix/ses-mailer",
"id": "c6a63d59d78dee11669a5523aebd9d726a6c142e",
"size": "1533",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13293"
}
],
"symlink_target": ""
} |
from discord import Embed, Color
description = "getting IDs"
async def ex(message, client):
args = message.content.split()[1:]
server = message.server
print(len(args))
if len(args) > 0:
query = " ".join(args).lower()
roles = ["%s - %s" % (r.name, r.id) for r in server.roles if query in r.name.lower()]
chans = ["%s - %s" % (c.name, c.id) for c in server.channels if query in c.name.lower()]
users = ["%s - %s" % (u.name, u.id) for u in server.members if query in u.name.lower()]
s_roles = "\n".join(roles) if len(roles) > 0 else "*No roles found*"
s_chans = "\n".join(chans) if len(chans) > 0 else "*No channels found*"
s_users = "\n".join(users) if len(users) > 0 else "*No users found*"
em = Embed(title="Elements found")
em.add_field(name="Roles", value=s_roles, inline=False)
em.add_field(name="Channels", value=s_chans, inline=False)
em.add_field(name="Users", value=s_users, inline=False)
await client.send_message(message.channel, embed=em)
else:
await client.send_message(message.channel, embed=Embed(description="**USAGE:**\n`!id <search string>`", color=Color.red())) | {
"content_hash": "1e979b9b1d5879847612029ac983a723",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 131,
"avg_line_length": 45.25925925925926,
"alnum_prop": 0.5990180032733224,
"repo_name": "zekroTJA/regiusBot",
"id": "30605f67a61ae4a6265ec4a1d7f31012767b9a14",
"size": "1222",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/cmd_getid.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76868"
},
{
"name": "Shell",
"bytes": "12"
}
],
"symlink_target": ""
} |
"""A lexical analyzer class for simple shell-like syntaxes."""
# Module and documentation by Eric S. Raymond, 21 Dec 1998
# Input stacking and error message cleanup added by ESR, March 2000
# push_source() and pop_source() made explicit by ESR, January 2001.
# Posix compliance, split(), string arguments, and
# iterator interface by Gustavo Niemeyer, April 2003.
import os
import re
import sys
from collections import deque
from io import StringIO
__all__ = ["shlex", "split", "quote"]
class shlex:
"A lexical analyzer class for simple shell-like syntaxes."
def __init__(self, instream=None, infile=None, posix=False):
if isinstance(instream, str):
instream = StringIO(instream)
if instream is not None:
self.instream = instream
self.infile = infile
else:
self.instream = sys.stdin
self.infile = None
self.posix = posix
if posix:
self.eof = None
else:
self.eof = ''
self.commenters = '#'
self.wordchars = ('abcdfeghijklmnopqrstuvwxyz'
'ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_')
if self.posix:
self.wordchars += ('ßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ'
'ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞ')
self.whitespace = ' \t\r\n'
self.whitespace_split = False
self.quotes = '\'"'
self.escape = '\\'
self.escapedquotes = '"'
self.state = ' '
self.pushback = deque()
self.lineno = 1
self.debug = 0
self.token = ''
self.filestack = deque()
self.source = None
def push_token(self, tok):
"Push a token onto the stack popped by the get_token method"
if self.debug >= 1:
print("shlex: pushing token " + repr(tok))
self.pushback.appendleft(tok)
def push_source(self, newstream, newfile=None):
"Push an input source onto the lexer's input source stack."
if isinstance(newstream, str):
newstream = StringIO(newstream)
self.filestack.appendleft((self.infile, self.instream, self.lineno))
self.infile = newfile
self.instream = newstream
self.lineno = 1
if self.debug:
if newfile is not None:
print('shlex: pushing to file %s' % (self.infile,))
else:
print('shlex: pushing to stream %s' % (self.instream,))
def pop_source(self):
"Pop the input source stack."
self.instream.close()
(self.infile, self.instream, self.lineno) = self.filestack.popleft()
if self.debug:
print('shlex: popping to %s, line %d' \
% (self.instream, self.lineno))
self.state = ' '
def get_token(self):
"Get a token from the input stream (or from stack if it's nonempty)"
if self.pushback:
tok = self.pushback.popleft()
if self.debug >= 1:
print("shlex: popping token " + repr(tok))
return tok
# No pushback. Get a token.
raw = self.read_token()
# Handle inclusions
if self.source is not None:
while raw == self.source:
spec = self.sourcehook(self.read_token())
if spec:
(newfile, newstream) = spec
self.push_source(newstream, newfile)
raw = self.get_token()
# Maybe we got EOF instead?
while raw == self.eof:
if not self.filestack:
return self.eof
else:
self.pop_source()
raw = self.get_token()
# Neither inclusion nor EOF
if self.debug >= 1:
if raw != self.eof:
print("shlex: token=" + repr(raw))
else:
print("shlex: token=EOF")
return raw
def read_token(self):
quoted = False
escapedstate = ' '
while True:
nextchar = self.instream.read(1)
if nextchar == '\n':
self.lineno = self.lineno + 1
if self.debug >= 3:
print("shlex: in state", repr(self.state), \
"I see character:", repr(nextchar))
if self.state is None:
self.token = '' # past end of file
break
elif self.state == ' ':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in whitespace state")
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars:
self.token = nextchar
self.state = 'a'
elif nextchar in self.quotes:
if not self.posix:
self.token = nextchar
self.state = nextchar
elif self.whitespace_split:
self.token = nextchar
self.state = 'a'
else:
self.token = nextchar
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.state in self.quotes:
quoted = True
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in quotes state")
# XXX what error should be raised here?
raise ValueError("No closing quotation")
if nextchar == self.state:
if not self.posix:
self.token = self.token + nextchar
self.state = ' '
break
else:
self.state = 'a'
elif self.posix and nextchar in self.escape and \
self.state in self.escapedquotes:
escapedstate = self.state
self.state = nextchar
else:
self.token = self.token + nextchar
elif self.state in self.escape:
if not nextchar: # end of file
if self.debug >= 2:
print("shlex: I see EOF in escape state")
# XXX what error should be raised here?
raise ValueError("No escaped character")
# In posix shells, only the quote itself or the escape
# character may be escaped within quotes.
if escapedstate in self.quotes and \
nextchar != self.state and nextchar != escapedstate:
self.token = self.token + self.state
self.token = self.token + nextchar
self.state = escapedstate
elif self.state == 'a':
if not nextchar:
self.state = None # end of file
break
elif nextchar in self.whitespace:
if self.debug >= 2:
print("shlex: I see whitespace in word state")
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif nextchar in self.commenters:
self.instream.readline()
self.lineno = self.lineno + 1
if self.posix:
self.state = ' '
if self.token or (self.posix and quoted):
break # emit current token
else:
continue
elif self.posix and nextchar in self.quotes:
self.state = nextchar
elif self.posix and nextchar in self.escape:
escapedstate = 'a'
self.state = nextchar
elif nextchar in self.wordchars or nextchar in self.quotes \
or self.whitespace_split:
self.token = self.token + nextchar
else:
self.pushback.appendleft(nextchar)
if self.debug >= 2:
print("shlex: I see punctuation in word state")
self.state = ' '
if self.token:
break # emit current token
else:
continue
result = self.token
self.token = ''
if self.posix and not quoted and result == '':
result = None
if self.debug > 1:
if result:
print("shlex: raw token=" + repr(result))
else:
print("shlex: raw token=EOF")
return result
def sourcehook(self, newfile):
"Hook called on a filename to be sourced."
if newfile[0] == '"':
newfile = newfile[1:-1]
# This implements cpp-like semantics for relative-path inclusion.
if isinstance(self.infile, str) and not os.path.isabs(newfile):
newfile = os.path.join(os.path.dirname(self.infile), newfile)
return (newfile, open(newfile, "r"))
def error_leader(self, infile=None, lineno=None):
"Emit a C-compiler-like, Emacs-friendly error-message leader."
if infile is None:
infile = self.infile
if lineno is None:
lineno = self.lineno
return "\"%s\", line %d: " % (infile, lineno)
def __iter__(self):
return self
def __next__(self):
token = self.get_token()
if token == self.eof:
raise StopIteration
return token
def split(s, comments=False, posix=True):
lex = shlex(s, posix=posix)
lex.whitespace_split = True
if not comments:
lex.commenters = ''
return list(lex)
_find_unsafe = re.compile(r'[^\w@%+=:,./-]', re.ASCII).search
def quote(s):
"""Return a shell-escaped version of the string *s*."""
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
def _print_tokens(lexer):
while 1:
tt = lexer.get_token()
if not tt:
break
print("Token: " + repr(tt))
if __name__ == '__main__':
if len(sys.argv) == 1:
_print_tokens(shlex())
else:
fn = sys.argv[1]
with open(fn) as f:
_print_tokens(shlex(f, fn))
| {
"content_hash": "90cc7a13bc932bfc7174b4636829cc90",
"timestamp": "",
"source": "github",
"line_count": 303,
"max_line_length": 76,
"avg_line_length": 37.48844884488449,
"alnum_prop": 0.4921207852803944,
"repo_name": "MalloyPower/parsing-python",
"id": "f08391800b14f62dfeae12ff721ad4ba1b418d52",
"size": "11421",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "front-end/testsuite-python-lib/Python-3.5.0/Lib/shlex.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "1963"
},
{
"name": "Lex",
"bytes": "238458"
},
{
"name": "Makefile",
"bytes": "4513"
},
{
"name": "OCaml",
"bytes": "412695"
},
{
"name": "Python",
"bytes": "17319"
},
{
"name": "Rascal",
"bytes": "523063"
},
{
"name": "Yacc",
"bytes": "429659"
}
],
"symlink_target": ""
} |
import logging
class NewStyleLogMessage(object):
def __init__(self, message, *args, **kwargs):
self.message = message
self.args = args
self.kwargs = kwargs
def __str__(self):
args = (i() if callable(i) else i for i in self.args)
kwargs = dict((k, v() if callable(v) else v)
for k, v in self.kwargs.items())
return self.message.format(*args, **kwargs)
N = NewStyleLogMessage
class StyleAdapter(logging.LoggerAdapter):
def __init__(self, logger, extra=None):
super(StyleAdapter, self).__init__(logger, extra or {})
def log(self, level, msg, *args, **kwargs):
if self.isEnabledFor(level):
msg, log_kwargs = self.process(msg, kwargs)
self.logger._log(level, N(msg, *args, **kwargs), (),
**log_kwargs)
logger = StyleAdapter(logging.getLogger("project"))
# Emits "Lazily formatted log entry: 123 foo" in log
# logger.debug('Lazily formatted entry: {0} {keyword}', 123, keyword='foo')
| {
"content_hash": "52c22ef357b66e801d4e924cef56c3d0",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 75,
"avg_line_length": 30.914285714285715,
"alnum_prop": 0.5730129390018485,
"repo_name": "poffey21/edge",
"id": "3b954ed7ab1cd61b6a34e7b648f1501a181848cb",
"size": "1818",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "src/project_name/logger.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "69"
},
{
"name": "CSS",
"bytes": "216"
},
{
"name": "HTML",
"bytes": "12000"
},
{
"name": "PowerShell",
"bytes": "207"
},
{
"name": "Python",
"bytes": "43508"
}
],
"symlink_target": ""
} |
from django.utils.translation import ugettext_lazy as _
import horizon
class NGVolumes(horizon.Panel):
name = _("Volumes")
slug = 'ngvolumes'
permissions = ('openstack.services.volume',)
| {
"content_hash": "37af6c9b3bb10737169b604ccd8b262a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 55,
"avg_line_length": 22.444444444444443,
"alnum_prop": 0.7079207920792079,
"repo_name": "wolverineav/horizon",
"id": "1ab2c7619b43f7c1b6a75326e3483af03522784d",
"size": "796",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/ngvolumes/panel.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "182861"
},
{
"name": "HTML",
"bytes": "547294"
},
{
"name": "JavaScript",
"bytes": "1954942"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "5103444"
},
{
"name": "Shell",
"bytes": "19593"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, unicode_literals
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
PROJECT_NAME = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
os.environ.setdefault('DJANGO_SETTINGS_MODULE', '{}.settings'.format(PROJECT_NAME))
application = get_wsgi_application()
application = DjangoWhiteNoise(application)
| {
"content_hash": "41970580d0a968e457e4bbb6d8e37364",
"timestamp": "",
"source": "github",
"line_count": 12,
"max_line_length": 83,
"avg_line_length": 34.583333333333336,
"alnum_prop": 0.7759036144578313,
"repo_name": "theeluwin/mashiro",
"id": "1c5739ade96482213b6b81796f72f710a289525a",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project_name/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2298"
},
{
"name": "HTML",
"bytes": "3622"
},
{
"name": "JavaScript",
"bytes": "4695"
},
{
"name": "Python",
"bytes": "14690"
}
],
"symlink_target": ""
} |
from __future__ import print_function, absolute_import, division, unicode_literals
# TEST_UNICODE_LITERALS
import pytest
import os
from astropy.table import Table
from frb.frbcat import FRBCat
#def data_path(filename):
# data_dir = os.path.join(os.path.dirname(__file__), 'files')
# return os.path.join(data_dir, filename)
def test_init():
frbobs = FRBCat()
assert isinstance(frbobs.frbcat, Table)
assert isinstance(frbobs.uniq_frb, Table)
# Specify file
#frbobs2 = FRBCat(frbcat_file='frbcat_2017-04-06.csv')
frbobs2 = FRBCat(frbcat_file='frbcat_20210309.csv')
assert len(frbobs2.frbcat) == 118
assert len(frbobs2.uniq_frb) == 118
| {
"content_hash": "9a3623bf0f8f60f2e35010b6ca25f57a",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 82,
"avg_line_length": 27.08,
"alnum_prop": 0.7060561299852289,
"repo_name": "FRBs/FRB",
"id": "69921d24077d7f99e9d58ccfeb38b9c55bf926c7",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "frb/tests/test_frbobs.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "2903347"
},
{
"name": "Python",
"bytes": "793709"
}
],
"symlink_target": ""
} |
from __future__ import division
import time
import numpy as np
import tensorflow as tf
from neupy import layers
from neupy.core.properties import (
FunctionWithOptionsProperty,
ScalarVariableProperty,
IntProperty, Property,
)
from neupy.utils import (
AttributeKeyDict, format_data,
as_tuple, iters, tf_utils,
)
from neupy.algorithms.gd import objectives
from neupy.exceptions import InvalidConnection
from neupy.algorithms.base import BaseNetwork
__all__ = ('BaseOptimizer', 'GradientDescent')
class BaseOptimizer(BaseNetwork):
"""
Gradient descent algorithm.
Parameters
----------
network : list, tuple or LayerConnection instance
Network's architecture. There are a few ways
to define it.
- List of layers.
For instance, ``[Input(2), Tanh(4), Relu(1)]``.
- Constructed layers.
For instance, ``Input(2) >> Tanh(4) >> Relu(1)``.
regularizer : function or None
Network's regularizer.
loss : str or function
Error/loss function. Defaults to ``mse``.
- ``mae`` - Mean Absolute Error.
- ``mse`` - Mean Squared Error.
- ``rmse`` - Root Mean Squared Error.
- ``msle`` - Mean Squared Logarithmic Error.
- ``rmsle`` - Root Mean Squared Logarithmic Error.
- ``categorical_crossentropy`` - Categorical cross entropy.
- ``binary_crossentropy`` - Binary cross entropy.
- ``binary_hinge`` - Binary hinge entropy.
- ``categorical_hinge`` - Categorical hinge entropy.
- Custom function which accepts two mandatory arguments.
The first one is expected value and the second one is
predicted value. Example:
.. code-block:: python
def custom_func(expected, predicted):
return expected - predicted
step : float, Variable
Learning rate, defaults to ``0.1``.
{BaseNetwork.show_epoch}
{BaseNetwork.shuffle_data}
{BaseNetwork.signals}
{BaseNetwork.verbose}
Attributes
----------
{BaseNetwork.Attributes}
Methods
-------
{BaseSkeleton.predict}
train(X_train, y_train, X_test=None, y_test=None, epochs=100)
Train network. You can control network's training procedure
with ``epochs`` parameter. The ``X_test`` and ``y_test`` should
be presented both in case network's validation required
after each training epoch.
{BaseSkeleton.fit}
"""
step = ScalarVariableProperty(default=0.1)
target = Property(default=None, allow_none=True)
regularizer = Property(default=None, allow_none=True)
loss = FunctionWithOptionsProperty(default='mse', choices={
'mae': objectives.mae,
'mse': objectives.mse,
'rmse': objectives.rmse,
'msle': objectives.msle,
'rmsle': objectives.rmsle,
'binary_crossentropy': objectives.binary_crossentropy,
'categorical_crossentropy': objectives.categorical_crossentropy,
'binary_hinge': objectives.binary_hinge,
'categorical_hinge': objectives.categorical_hinge,
})
def __init__(self, network, options=None, **kwargs):
options = options or kwargs
if isinstance(network, (list, tuple)):
network = layers.join(*network)
self.network = network
if len(self.network.output_layers) != 1:
n_outputs = len(network.output_layers)
raise InvalidConnection(
"Connection should have one output "
"layer, got {}".format(n_outputs))
target = options.get('target')
if target is not None and isinstance(target, (list, tuple)):
options['target'] = tf.placeholder(tf.float32, shape=target)
self.target = self.network.targets
super(BaseOptimizer, self).__init__(**options)
start_init_time = time.time()
self.logs.message(
"TENSORFLOW",
"Initializing Tensorflow variables and functions.")
self.variables = AttributeKeyDict()
self.functions = AttributeKeyDict()
self.network.outputs
self.init_functions()
self.logs.message(
"TENSORFLOW",
"Initialization finished successfully. It took {:.2f} seconds"
"".format(time.time() - start_init_time))
def init_train_updates(self):
raise NotImplementedError()
def init_functions(self):
loss = self.loss(self.target, self.network.outputs)
val_loss = self.loss(self.target, self.network.training_outputs)
if self.regularizer is not None:
loss += self.regularizer(self.network)
self.variables.update(
step=self.step,
loss=loss,
val_loss=val_loss,
)
with tf.name_scope('training-updates'):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
training_updates = self.init_train_updates()
training_updates.extend(update_ops)
tf_utils.initialize_uninitialized_variables()
with tf.name_scope('optimizer'):
self.functions.update(
predict=tf_utils.function(
inputs=as_tuple(self.network.inputs),
outputs=self.network.outputs,
name='predict'
),
one_training_update=tf_utils.function(
inputs=as_tuple(self.network.inputs, self.target),
outputs=loss,
updates=training_updates,
name='one-update-step'
),
score=tf_utils.function(
inputs=as_tuple(self.network.inputs, self.target),
outputs=val_loss,
name='score'
),
)
def format_input(self, X):
X = as_tuple(X)
X_formatted = []
if len(X) != len(self.network.input_layers):
raise ValueError(
"Number of inputs doesn't match number "
"of input layers in the network.")
for input, input_layer in zip(X, self.network.input_layers):
input_shape = tf.TensorShape(input_layer.input_shape)
is_feature1d = (input_shape.ndims == 2 and input_shape[1] == 1)
formatted_input = format_data(input, is_feature1d=is_feature1d)
if (formatted_input.ndim + 1) == input_shape.ndims:
# We assume that when one dimension was missed than user
# wants to propagate single sample through the network
formatted_input = np.expand_dims(formatted_input, axis=0)
X_formatted.append(formatted_input)
return X_formatted
def format_target(self, y):
output_shape = tf.TensorShape(self.network.output_shape)
is_feature1d = (output_shape.ndims == 2 and output_shape[1] == 1)
formatted_target = format_data(y, is_feature1d=is_feature1d)
if (formatted_target.ndim + 1) == len(output_shape):
# We assume that when one dimension was missed than user
# wants to propagate single sample through the network
formatted_target = np.expand_dims(formatted_target, axis=0)
return formatted_target
def score(self, X, y):
"""
Calculate prediction accuracy for input data.
Parameters
----------
X : array-like
y : array-like
Returns
-------
float
Prediction error.
"""
X = self.format_input(X)
y = self.format_target(y)
return self.functions.score(*as_tuple(X, y))
def predict(self, *X, **kwargs):
"""
Makes a raw prediction.
Parameters
----------
X : array-like
Returns
-------
array-like
"""
default_batch_size = getattr(self, 'batch_size', None)
predict_kwargs = dict(
batch_size=kwargs.pop('batch_size', default_batch_size),
verbose=self.verbose,
)
# We require do to this check for python 2 compatibility
if kwargs:
raise TypeError("Unknown arguments: {}".format(kwargs))
return self.network.predict(*self.format_input(X), **predict_kwargs)
def train(self, X_train, y_train, X_test=None, y_test=None,
*args, **kwargs):
is_test_data_partialy_missing = (
(X_test is None and y_test is not None) or
(X_test is not None and y_test is None)
)
if is_test_data_partialy_missing:
raise ValueError(
"Input or target test samples are missed. They "
"must be defined together or none of them.")
X_train = self.format_input(X_train)
y_train = self.format_target(y_train)
if X_test is not None:
X_test = self.format_input(X_test)
y_test = self.format_target(y_test)
return super(BaseOptimizer, self).train(
X_train=X_train, y_train=y_train,
X_test=X_test, y_test=y_test,
*args, **kwargs)
def one_training_update(self, X_train, y_train):
return self.functions.one_training_update(
*as_tuple(X_train, y_train))
def get_params(self, deep=False, with_network=True):
params = super(BaseOptimizer, self).get_params()
if with_network:
params['network'] = self.network
return params
def __reduce__(self):
parameters = self.get_params(with_network=False)
# We only need to know placeholders shape
# in order to be able to reconstruct it
parameters['target'] = tf_utils.shape_to_tuple(
parameters['target'].shape)
args = (self.network, parameters)
return (self.__class__, args)
def __repr__(self):
return "{}({}, {})".format(
self.__class__.__name__,
self.network,
self.repr_options())
class GradientDescent(BaseOptimizer):
"""
Mini-batch Gradient Descent algorithm.
Parameters
----------
batch_size : int or None
Set up min-batch size. The ``None`` value will ensure that all data
samples will be propagated through the network at once.
Defaults to ``128``.
{BaseOptimizer.Parameters}
Attributes
----------
{BaseOptimizer.Attributes}
Methods
-------
{BaseOptimizer.Methods}
Examples
--------
>>> import numpy as np
>>> from neupy import algorithms
>>> from neupy.algorithms import *
>>>
>>> x_train = np.array([[1, 2], [3, 4]])
>>> y_train = np.array([[1], [0]])
>>>
>>> network = Input(2) >> Sigmoid(3) >> Sigmoid(1)
>>> optimizer = algorithms.GradientDescent(network, batch_size=1)
>>> optimizer.train(x_train, y_train)
"""
batch_size = IntProperty(default=128, minval=0, allow_none=True)
def init_train_updates(self):
optimizer = tf.train.GradientDescentOptimizer(
learning_rate=self.step,
)
self.functions.optimizer = optimizer
return [optimizer.minimize(self.variables.loss)]
def one_training_update(self, X_train, y_train):
"""
Train one epoch.
Parameters
----------
X_train : array-like
Training input dataset.
y_train : array-like
Training target dataset.
Returns
-------
float
Training error.
"""
return self.functions.one_training_update(
*as_tuple(X_train, y_train))
def score(self, X, y):
"""
Check the prediction error for the specified input samples
and their targets.
Parameters
----------
X : array-like
y : array-like
Returns
-------
float
Prediction error.
"""
X = self.format_input(X)
y = self.format_target(y)
return iters.apply_batches(
function=self.functions.score,
inputs=as_tuple(X, y),
batch_size=self.batch_size,
show_output=True,
show_progressbar=self.logs.enable,
average_outputs=True,
)
| {
"content_hash": "2237582d32fec7b020ddfd0741d8aee0",
"timestamp": "",
"source": "github",
"line_count": 419,
"max_line_length": 76,
"avg_line_length": 29.599045346062052,
"alnum_prop": 0.5748266408643767,
"repo_name": "itdxer/neupy",
"id": "ac1f1315d85faca876a8cb810a4a5356e8c680c8",
"size": "12402",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neupy/algorithms/gd/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "13847"
},
{
"name": "JavaScript",
"bytes": "7460"
},
{
"name": "Python",
"bytes": "16002521"
},
{
"name": "Shell",
"bytes": "434"
}
],
"symlink_target": ""
} |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cfp', '0020_auto_20180507_1305'),
]
operations = [
migrations.AddField(
model_name='paperapplication',
name='grant_email_contact',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='paperapplication',
name='grant_process_data',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='paperapplication',
name='grant_publish_data',
field=models.BooleanField(default=False),
),
migrations.AddField(
model_name='paperapplication',
name='grant_publish_video',
field=models.BooleanField(default=False),
),
migrations.AlterField(
model_name='paperapplication',
name='duration',
field=models.CharField(choices=[('25', '25 Minutes'), ('45', '45 Minutes')], default='25', help_text='What talk duration slot would you like? Take into account that there are only 8 slots for 45 minute talks, and 20 slots for 25 minute talks.', max_length=255, verbose_name='Talk Duration Slot'),
),
]
| {
"content_hash": "970084b7b519bff0d99f9e2960e826e3",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 309,
"avg_line_length": 36.388888888888886,
"alnum_prop": 0.5946564885496183,
"repo_name": "WebCampZg/conference-web",
"id": "371314138b32edba4ef4aa3a41edb03706672604",
"size": "1359",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cfp/migrations/0021_auto_20180517_1211.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "131971"
},
{
"name": "JavaScript",
"bytes": "3928"
},
{
"name": "Makefile",
"bytes": "1944"
},
{
"name": "Python",
"bytes": "268738"
},
{
"name": "SCSS",
"bytes": "41619"
}
],
"symlink_target": ""
} |
from approver.models.models import *
from approver.models.approve_models import *
from approver.models.audit_trail_models import AuditTrail
from approver.models.bridge_models import Registerable
| {
"content_hash": "4ba16c04b73a4426ab5731a5ca76ee03",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 57,
"avg_line_length": 48.75,
"alnum_prop": 0.8461538461538461,
"repo_name": "amberlallen/qipr_approver",
"id": "9672ae1cb5bd2e3edfc1b65627ec9daaaa35d34c",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "qipr_approver/approver/models/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "211172"
},
{
"name": "HTML",
"bytes": "38102"
},
{
"name": "JavaScript",
"bytes": "385413"
},
{
"name": "Python",
"bytes": "133616"
},
{
"name": "Ruby",
"bytes": "911"
},
{
"name": "Shell",
"bytes": "4620"
},
{
"name": "VimL",
"bytes": "1716"
}
],
"symlink_target": ""
} |
import os
import sys
sys.path.append('..')
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "demo.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| {
"content_hash": "20659f9e77c071fc056f93aec6abdd52",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 22.545454545454547,
"alnum_prop": 0.6935483870967742,
"repo_name": "Alkemic/yaACL",
"id": "c2de76d7d1c39ad5ac9708a482fa6b71f3c3e5b2",
"size": "270",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/manage.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "1373"
},
{
"name": "Python",
"bytes": "18113"
}
],
"symlink_target": ""
} |
from horizon import exceptions
from horizon import forms
from horizon import workflows
from openstack_dashboard.api import nova
from openstack_dashboard.dashboards.project.data_processing.utils \
import neutron_support
import openstack_dashboard.dashboards.project.data_processing.utils. \
workflow_helpers as whelpers
from django.core import urlresolvers
from django.utils.translation import ugettext_lazy as _
from openstack_dashboard.api import sahara as saharaclient
import openstack_dashboard.dashboards.project.data_processing. \
cluster_templates. workflows.create as t_flows
from saharaclient.api import base as api_base
import logging
LOG = logging.getLogger(__name__)
KEYPAIR_IMPORT_URL = "horizon:project:access_and_security:keypairs:import"
BASE_IMAGE_URL = "horizon:project:data_processing.data_image_registry:register"
TEMPLATE_UPLOAD_URL = (
"horizon:project:data_processing.cluster_templates:upload_file")
class SelectPluginAction(t_flows.SelectPluginAction):
class Meta(object):
name = _("Select plugin and hadoop version for cluster")
help_text_template = (
"project/data_processing.clusters/_create_general_help.html")
class SelectPlugin(t_flows.SelectPlugin):
action_class = SelectPluginAction
class CreateCluster(t_flows.CreateClusterTemplate):
slug = "create_cluster"
name = _("Launch Cluster")
success_url = "horizon:project:data_processing.cluster_templates:index"
default_steps = (SelectPlugin,)
class GeneralConfigAction(workflows.Action):
populate_neutron_management_network_choices = \
neutron_support.populate_neutron_management_network_choices
hidden_configure_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_configure_field"}))
hidden_to_delete_field = forms.CharField(
required=False,
widget=forms.HiddenInput(attrs={"class": "hidden_to_delete_field"}))
cluster_name = forms.CharField(label=_("Cluster Name"))
description = forms.CharField(label=_("Description"),
required=False,
widget=forms.Textarea(attrs={'rows': 4}))
cluster_template = forms.DynamicChoiceField(label=_("Cluster Template"),
initial=(None, "None"),
add_item_link=
TEMPLATE_UPLOAD_URL)
image = forms.DynamicChoiceField(label=_("Base Image"),
add_item_link=BASE_IMAGE_URL)
keypair = forms.DynamicChoiceField(
label=_("Keypair"),
required=False,
help_text=_("Which keypair to use for authentication."),
add_item_link=KEYPAIR_IMPORT_URL)
def __init__(self, request, *args, **kwargs):
super(GeneralConfigAction, self).__init__(request, *args, **kwargs)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
if saharaclient.base.is_service_enabled(request, 'network'):
self.fields["neutron_management_network"] = forms.ChoiceField(
label=_("Neutron Management Network"),
choices=self.populate_neutron_management_network_choices(
request, {})
)
self.fields["plugin_name"] = forms.CharField(
widget=forms.HiddenInput(),
initial=plugin
)
self.fields["hadoop_version"] = forms.CharField(
widget=forms.HiddenInput(),
initial=hadoop_version
)
def populate_image_choices(self, request, context):
try:
all_images = saharaclient.image_list(request)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
details = saharaclient.plugin_get_version_details(request,
plugin,
hadoop_version)
choices = [(image.id, image.name) for image in all_images
if (set(details.required_image_tags).
issubset(set(image.tags)))]
except Exception:
exceptions.handle(request,
_("Unable to fetch image choices."))
choices = []
if not choices:
choices.append(("", _("No Images Available")))
return choices
def populate_keypair_choices(self, request, context):
try:
keypairs = nova.keypair_list(request)
except Exception:
keypairs = []
exceptions.handle(request,
_("Unable to fetch keypair choices."))
keypair_list = [(kp.name, kp.name) for kp in keypairs]
keypair_list.insert(0, ("", _("No keypair")))
return keypair_list
def populate_cluster_template_choices(self, request, context):
templates = saharaclient.cluster_template_list(request)
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
choices = [(template.id, template.name)
for template in templates
if (template.hadoop_version == hadoop_version and
template.plugin_name == plugin)]
if not choices:
choices.append(("", _("No Templates Available")))
# cluster_template_id comes from cluster templates table, when
# Create Cluster from template is clicked there
selected_template_name = None
resolver_match = urlresolvers.resolve(request.path)
if "cluster_template_name" in resolver_match.kwargs:
selected_template_name = (
resolver_match.kwargs["cluster_template_name"])
if selected_template_name:
for template in templates:
if template.name == selected_template_name:
selected_template_id = template.id
break
else:
selected_template_id = (
request.REQUEST.get("cluster_template_id", None))
for template in templates:
if template.id == selected_template_id:
self.fields['cluster_template'].initial = template.id
return choices
def get_help_text(self):
extra = dict()
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(self.request)
extra["plugin_name"] = plugin
extra["hadoop_version"] = hadoop_version
return super(GeneralConfigAction, self).get_help_text(extra)
def clean(self):
cleaned_data = super(GeneralConfigAction, self).clean()
if cleaned_data.get("hidden_configure_field", None) \
== "create_nodegroup":
self._errors = dict()
return cleaned_data
class Meta(object):
name = _("Configure Cluster")
help_text_template = \
("project/data_processing.clusters/_configure_general_help.html")
class GeneralConfig(workflows.Step):
action_class = GeneralConfigAction
contributes = ("hidden_configure_field", )
def contribute(self, data, context):
for k, v in data.items():
context["general_" + k] = v
return context
class ConfigureCluster(whelpers.StatusFormatMixin, workflows.Workflow):
slug = "configure_cluster"
name = _("Launch Cluster")
finalize_button_name = _("Launch")
success_message = _("Launched Cluster %s")
name_property = "general_cluster_name"
success_url = "horizon:project:data_processing.clusters:index"
default_steps = (GeneralConfig, )
def handle(self, request, context):
try:
# TODO(nkonovalov) Implement AJAX Node Groups.
node_groups = None
plugin, hadoop_version = whelpers.\
get_plugin_and_hadoop_version(request)
cluster_template_id = context["general_cluster_template"] or None
user_keypair = context["general_keypair"] or None
saharaclient.cluster_create(
request,
context["general_cluster_name"],
plugin, hadoop_version,
cluster_template_id=cluster_template_id,
default_image_id=context["general_image"],
description=context["general_description"],
node_groups=node_groups,
user_keypair_id=user_keypair,
net_id=context.get("general_neutron_management_network", None))
return True
except api_base.APIException as e:
self.error_description = str(e)
return False
except Exception:
exceptions.handle(request,
_('Unable to create the cluster'))
return False
| {
"content_hash": "02b3a22aec097f06b02d5285f667f497",
"timestamp": "",
"source": "github",
"line_count": 241,
"max_line_length": 79,
"avg_line_length": 37.14107883817427,
"alnum_prop": 0.6021673556027259,
"repo_name": "blueboxgroup/horizon",
"id": "e680c945dbbacef201e4fc4fdf541cb1f01c3610",
"size": "9497",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "openstack_dashboard/dashboards/project/data_processing/clusters/workflows/create.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "121734"
},
{
"name": "HTML",
"bytes": "482545"
},
{
"name": "JavaScript",
"bytes": "509562"
},
{
"name": "Makefile",
"bytes": "588"
},
{
"name": "Python",
"bytes": "4636632"
},
{
"name": "Shell",
"bytes": "18299"
}
],
"symlink_target": ""
} |
import os
import pytest
import capnp
this_dir = os.path.dirname(__file__)
# flake8: noqa: E501
@pytest.fixture
def capability():
capnp.cleanup_global_schema_parser()
return capnp.load(os.path.join(this_dir, 'test_capability.capnp'))
class Server:
def __init__(self, val=1):
self.val = val
def foo_context(self, context):
extra = 0
if context.params.j:
extra = 1
context.results.x = str(context.params.i * 5 + extra + self.val)
def buz_context(self, context):
context.results.x = context.params.i.host + '_test'
class PipelineServer:
def getCap_context(self, context):
def _then(response):
context.results.s = response.x + '_foo'
context.results.outBox.cap = capability().TestInterface._new_server(Server(100))
return context.params.inCap.foo(i=context.params.n).then(_then)
def test_client_context(capability):
client = capability.TestInterface._new_client(Server())
req = client._request('foo')
req.i = 5
remote = req.send()
response = remote.wait()
assert response.x == '26'
req = client.foo_request()
req.i = 5
remote = req.send()
response = remote.wait()
assert response.x == '26'
with pytest.raises(AttributeError):
client.foo2_request()
req = client.foo_request()
with pytest.raises(Exception):
req.i = 'foo'
req = client.foo_request()
with pytest.raises(AttributeError):
req.baz = 1
def test_simple_client_context(capability):
client = capability.TestInterface._new_client(Server())
remote = client._send('foo', i=5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(i=5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(i=5, j=True)
response = remote.wait()
assert response.x == '27'
remote = client.foo(5)
response = remote.wait()
assert response.x == '26'
remote = client.foo(5, True)
response = remote.wait()
assert response.x == '27'
remote = client.foo(5, j=True)
response = remote.wait()
assert response.x == '27'
remote = client.buz(capability.TestSturdyRefHostId.new_message(host='localhost'))
response = remote.wait()
assert response.x == 'localhost_test'
with pytest.raises(Exception):
remote = client.foo(5, 10)
with pytest.raises(Exception):
remote = client.foo(5, True, 100)
with pytest.raises(Exception):
remote = client.foo(i='foo')
with pytest.raises(AttributeError):
remote = client.foo2(i=5)
with pytest.raises(Exception):
remote = client.foo(baz=5)
@pytest.mark.xfail
def test_pipeline_context(capability):
'''
E capnp.lib.capnp.KjException: capnp/lib/capnp.pyx:61: failed: <class 'Failed'>:Fixture "capability" called directly. Fixtures are not meant to be called directly,
E but are created automatically when test functions request them as parameters.
E See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and
E https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code.
E stack: 7f87c1ac6e40 7f87c17c3250 7f87c17be260 7f87c17c49f0 7f87c17c0f50 7f87c17c5540 7f87c17d7bf0 7f87c1acb768 7f87c1aaf185 7f87c1aaf2dc 7f87c1a6da1d 7f87c3895459 7f87c3895713 7f87c38c72eb 7f87c3901409 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c38fdb77 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c3901409 7f87c38b6632 7f87c38c71cf 7f87c3901409
'''
client = capability.TestPipeline._new_client(PipelineServer())
foo_client = capability.TestInterface._new_client(Server())
remote = client.getCap(n=5, inCap=foo_client)
outCap = remote.outBox.cap
pipelinePromise = outCap.foo(i=10)
response = pipelinePromise.wait()
assert response.x == '150'
response = remote.wait()
assert response.s == '26_foo'
class BadServer:
def __init__(self, val=1):
self.val = val
def foo_context(self, context):
context.results.x = str(context.params.i * 5 + self.val)
context.results.x2 = 5 # raises exception
def test_exception_client_context(capability):
client = capability.TestInterface._new_client(BadServer())
remote = client._send('foo', i=5)
with pytest.raises(capnp.KjException):
remote.wait()
class BadPipelineServer:
def getCap_context(self, context):
def _then(response):
context.results.s = response.x + '_foo'
context.results.outBox.cap = capability().TestInterface._new_server(Server(100))
def _error(error):
raise Exception('test was a success')
return context.params.inCap.foo(i=context.params.n).then(_then, _error)
def test_exception_chain_context(capability):
client = capability.TestPipeline._new_client(BadPipelineServer())
foo_client = capability.TestInterface._new_client(BadServer())
remote = client.getCap(n=5, inCap=foo_client)
try:
remote.wait()
except Exception as e:
assert 'test was a success' in str(e)
def test_pipeline_exception_context(capability):
client = capability.TestPipeline._new_client(BadPipelineServer())
foo_client = capability.TestInterface._new_client(BadServer())
remote = client.getCap(n=5, inCap=foo_client)
outCap = remote.outBox.cap
pipelinePromise = outCap.foo(i=10)
with pytest.raises(Exception):
pipelinePromise.wait()
with pytest.raises(Exception):
remote.wait()
def test_casting_context(capability):
client = capability.TestExtends._new_client(Server())
client2 = client.upcast(capability.TestInterface)
_ = client2.cast_as(capability.TestInterface)
with pytest.raises(Exception):
client.upcast(capability.TestPipeline)
class TailCallOrder:
def __init__(self):
self.count = -1
def getCallSequence_context(self, context):
self.count += 1
context.results.n = self.count
class TailCaller:
def __init__(self):
self.count = 0
def foo_context(self, context):
self.count += 1
tail = context.params.callee.foo_request(i=context.params.i, t='from TailCaller')
return context.tail_call(tail)
class TailCallee:
def __init__(self):
self.count = 0
def foo_context(self, context):
self.count += 1
results = context.results
results.i = context.params.i
results.t = context.params.t
results.c = capability().TestCallOrder._new_server(TailCallOrder())
@pytest.mark.xfail
def test_tail_call(capability):
'''
E capnp.lib.capnp.KjException: capnp/lib/capnp.pyx:75: failed: <class 'Failed'>:Fixture "capability" called directly. Fixtures are not meant to be called directly,
E but are created automatically when test functions request them as parameters.
E See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and
E https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code.
E stack: 7f87c17c5540 7f87c17c51b0 7f87c17c5540 7f87c17d7bf0 7f87c1acb768 7f87c1aaf185 7f87c1aaf2dc 7f87c1a6da1d 7f87c3895459 7f87c3895713 7f87c38c72eb 7f87c3901409 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b6e7e 7f87c38fe48d 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c38fdb77 7f87c38b5767 7f87c38b67d2 7f87c38c71cf 7f87c3901409 7f87c38b6632 7f87c38c71cf 7f87c3901409 7f87c38b5767 7f87c38b6e7e 7f87c388ace7
'''
callee_server = TailCallee()
caller_server = TailCaller()
callee = capability.TestTailCallee._new_client(callee_server)
caller = capability.TestTailCaller._new_client(caller_server)
promise = caller.foo(i=456, callee=callee)
dependent_call1 = promise.c.getCallSequence()
response = promise.wait()
assert response.i == 456
assert response.i == 456
dependent_call2 = response.c.getCallSequence()
dependent_call3 = response.c.getCallSequence()
result = dependent_call1.wait()
assert result.n == 0
result = dependent_call2.wait()
assert result.n == 1
result = dependent_call3.wait()
assert result.n == 2
assert callee_server.count == 1
assert caller_server.count == 1
| {
"content_hash": "7117ba1c17470761b4414118a2e2b0ee",
"timestamp": "",
"source": "github",
"line_count": 269,
"max_line_length": 430,
"avg_line_length": 31.390334572490705,
"alnum_prop": 0.6856939838938891,
"repo_name": "SymbiFlow/pycapnp",
"id": "e4d6fb6d5752cfd25be2e94b4e79e3dc4e487b89",
"size": "8444",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test/test_capability_context.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "447"
},
{
"name": "C++",
"bytes": "15822"
},
{
"name": "Cap'n Proto",
"bytes": "27443"
},
{
"name": "Python",
"bytes": "368736"
},
{
"name": "Shell",
"bytes": "268"
}
],
"symlink_target": ""
} |
from django.contrib import admin
from .models import Escuela, Colegiatura
admin.site.register(Escuela)
admin.site.register(Colegiatura)
| {
"content_hash": "c3f67ea86a4ec2dd98929c20693bb9e3",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 40,
"avg_line_length": 27.4,
"alnum_prop": 0.8321167883211679,
"repo_name": "erikiado/jp2_online",
"id": "a3084327b1117b955da7544ac4d2f2a437799785",
"size": "137",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "administracion/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "14504"
},
{
"name": "HTML",
"bytes": "146491"
},
{
"name": "JavaScript",
"bytes": "15162"
},
{
"name": "Python",
"bytes": "586358"
}
],
"symlink_target": ""
} |
'''
Created on May 30, 2015
@author: isdal
'''
import ConfigParser
import logging
import time
from discretepid import PID
from fancontroller.filters import MedianFilter
from fancontroller.fan_gpio import FanGpio
from math import ceil
import urllib
import httplib
import os
import sys
import requests
import json
STATE_OFF = 0
STATE_ON = 1
class NoaaForecast:
def __init__(self):
self._cache = None
def _ftoc(self, degrees_f):
return (float(degrees_f) - 32) * 5/9
def Download(self):
url = 'http://forecast.weather.gov/MapClick.php?lat=47.6738&lon=-122.342&unit=c&lg=english&FcstType=json'
resp = requests.get(url=url)
open('noaa.json','w').write(resp.text)
self._cache = json.loads(resp.text)
def GetCurrentTemp(self):
if not self._cache:
return None
return self._ftoc(self._cache['currentobservation']['Temp'])
def GetTomorrowsHigh(self):
if not self._cache:
return None
high_pos = self._cache['time']['tempLabel'].index('High')
return float(self._cache['data']['temperature'][high_pos])
class _TempSensorReader:
def __init__(self, sensor):
self.sensor = sensor
def Read(self):
with open('/sensors/' + self.sensor, 'r') as sensor_file:
temp_raw = sensor_file.readlines()
if temp_raw[0].strip()[-3:] != 'YES':
logging.warn('Got non YES from sensor: ' + temp_raw[0])
return None
temp_pos = temp_raw[1].find('t=')
if temp_pos == -1:
logging.warn('No =t sensor read: ' + temp_raw[1])
return None
temp_c = int(temp_raw[1][temp_pos + 2:]) / 1000.0
logging.debug('sensor %s=%f C', self.sensor, temp_c)
return temp_c
class _FanController:
"""Class for controlling a physical fan """
_MIN_UPDATE_DELAY = 60
_MAX_SPEED_AT_DIFF = 4
_MIN_SPEED = 0.2
_GPIO_PORT = 17
def __init__(self, target):
self._state = STATE_OFF
self._speed = 0
self.last_update = 0
self.state_changes = 0
self.gpio = FanGpio(_FanController._GPIO_PORT)
# start off.
self.gpio.Off()
def _TurnOnFan(self):
logging.info('Turning on fan')
self.gpio.On()
def _TurnOffFan(self):
logging.info('Turning off fan')
self.gpio.Off()
def _SetActualFan(self, speed):
update_delay = self._GetTime() - self.last_update
if update_delay < _FanController._MIN_UPDATE_DELAY:
logging.warn('Fan state flapping, last update %d seconds ago', update_delay)
return
self._speed = speed
self.state_changes += 1
if self._state:
self._TurnOnFan()
else:
self._TurnOffFan()
def _GetTime(self):
return time.time()
def GetState(self):
return self._state
def UpdateState(self, new_state, speed=None):
if (self._state != new_state and
(speed is None or
self.speed != speed)):
self._state = new_state
self._SetActualFan(speed)
class Thermostat:
WINDOW = 60
HYSTERESIS = 1
MIN_OUTSIDE_DIFF = 1
def __init__(self, target_temp,
outside_window=WINDOW,
inside_window=WINDOW,
hysteresis=HYSTERESIS,
min_outside_diff=MIN_OUTSIDE_DIFF):
self._hysteresis = hysteresis
self._min_outside_diff = min_outside_diff
self._outside_temp = MedianFilter(outside_window)
self._inside_temp = MedianFilter(inside_window)
self._target_temp = target_temp
self._fc = _FanController(target_temp)
self.p = PID(P=-1.5, I=-0.00, D=-0.0) # I=-0.03, D=-1)
self.p.setPoint(target_temp)
self.pid = 0
def RecordIndoorMeasurement(self, temperature):
self._inside_temp.add(temperature)
def RecordOutdoorMeasurement(self, temperature):
self._outside_temp.add(temperature)
def _RecomputeState(self, inside, outside, curr_state):
# Target temp depends on outside temp. If it is warm outside the target
# is min_outside_diff higher that the outside temp.
target = max(self._target_temp, outside + self._min_outside_diff)
if curr_state == STATE_OFF:
# To prevent flapping the target temp is higher when the fan is off...
target += self._hysteresis / 2.0
else:
# ... and lower when it is on.
target -= self._hysteresis / 2.0
diff = inside - target
self.p.setPoint(target)
def bucket(v, buckets=20):
return ceil(v * buckets) / float(buckets)
self.pid = max(0, min(1, bucket(self.p.update(inside), buckets=10)))
msg = 'in_state: %d\tcomp_target %.2f\ttarget: %.2f\tin: %.2f\tout: %.2f\tdiff: %.2f\tpid:%.2f' % (
curr_state, target, self._target_temp, inside, outside, diff, self.pid)
if self.pid:
# if inside > target:
logging.info('ON:\t' + msg)
self._fc.UpdateState(STATE_ON)
return STATE_ON
else:
logging.info('OFF:\t' + msg)
self._fc.UpdateState(STATE_OFF)
return STATE_OFF
def ControlLoop(self):
outside = self._outside_temp.getMedian()
inside = self._inside_temp.getMedian()
if outside is None or inside is None:
logging.warn('Not enough measurements.')
return STATE_OFF
new_state = self._RecomputeState(inside, outside, self._fc.GetState())
return new_state
def GetStateChangeCount(self):
return self._fc.state_changes
def GetMeasurements(self):
"""Returns a dict with the current states to report.
indoor_temp, outdoor_temp, pid, target_temp
"""
return {'indoor_temp' : self._inside_temp.getMedian(),
'outdoor_temp' : self._outside_temp.getMedian(),
'pid': self.pid,
'target_temp': self.p.getPoint()}
class MetricsUploader:
def __init__(self):
pass
def Upload(self, measurements):
try:
# https://thingspeak.com/channels/43590
params = urllib.urlencode({'field1': measurements['indoor_temp'],
'field2': measurements['outdoor_temp'],
'field3': measurements['pid'],
'field4': measurements['target_temp'],
'key':'102HUIUCF7VYDM1K'})
headers = {'Content-type': 'application/x-www-form-urlencoded','Accept': 'text/plain'}
conn = httplib.HTTPConnection('api.thingspeak.com:80')
conn.request('POST', '/update', params, headers)
ts_response = conn.getresponse()
logging.debug('Thingspeak Response: %s %s', ts_response.status, ts_response.reason)
conn.close
except Exception as e:
logging.exception(e)
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format="[%(asctime)s] %(levelname)s [%(name)s.%(funcName)s:%(lineno)d] %(message)s",
datefmt="%H:%M:%S", stream=sys.stdout)
indoor_sensor = _TempSensorReader('indoor_temp')
outdoor_sensor = _TempSensorReader('outdoor_temp')
config = ConfigParser.RawConfigParser({
'target_temp': 22.5,
'hysteresis': 0.5,
'min_outside_diff': 0.5,
'period': 5,
'report_period': 60,
})
config_file = 'config.txt'
if len(sys.argv) == 2:
config_file = sys.argv[1]
if os.path.isfile(config_file):
logging.info('reading config %s', config_file)
config.read(config_file)
else:
logging.info('config file not found, using defaults')
logging.info('current directory: %s, config file: %s', os.getcwd(), config_file)
uploader = MetricsUploader()
thermostat = Thermostat(target_temp=float(config.get('DEFAULT', 'target_temp')),
outside_window=1,
inside_window=1,
hysteresis=float(config.get('DEFAULT', 'hysteresis')),
min_outside_diff=float(config.get('DEFAULT', 'min_outside_diff')))
logging.info('Thermostat started, target: %f', thermostat._target_temp)
start_time = time.time()
last_report_time = start_time
iteration = 0
PERIOD = int(config.get('DEFAULT', 'period'))
REPORT_PERIOD = int(config.get('DEFAULT', 'report_period'))
while True:
thermostat.RecordIndoorMeasurement(indoor_sensor.Read())
thermostat.RecordOutdoorMeasurement(outdoor_sensor.Read())
thermostat.ControlLoop()
if iteration % int(REPORT_PERIOD / PERIOD) == 0:
uploader.Upload(thermostat.GetMeasurements())
iteration += 1
next_iteration_start = start_time + iteration * PERIOD
sleep_s = next_iteration_start - time.time()
logging.debug('sleeping %.2fs', sleep_s)
if sleep_s > 0:
time.sleep(sleep_s)
else:
start_time = time.time()
iteration = 0
| {
"content_hash": "a7472d13486c359df6db9e207c8fdbd6",
"timestamp": "",
"source": "github",
"line_count": 267,
"max_line_length": 113,
"avg_line_length": 35.30337078651685,
"alnum_prop": 0.5664120517716953,
"repo_name": "isdal/raspberrypi-fan-controller",
"id": "f223aff26cd68a446eba7b336215ad94cf2d4284",
"size": "9426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fancontroller/fan_controller.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "23686"
},
{
"name": "Shell",
"bytes": "347"
}
],
"symlink_target": ""
} |
Subsets and Splits