hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9f514a9af593f0be206ac89710f6f10e51760c0c
| 1,112 |
py
|
Python
|
BeautyWorld/urls.py
|
leoniknik/BeautyWorldDjango
|
93c44cd7ebf283b663020f166ec7075ceeb070c8
|
[
"MIT"
] | null | null | null |
BeautyWorld/urls.py
|
leoniknik/BeautyWorldDjango
|
93c44cd7ebf283b663020f166ec7075ceeb070c8
|
[
"MIT"
] | null | null | null |
BeautyWorld/urls.py
|
leoniknik/BeautyWorldDjango
|
93c44cd7ebf283b663020f166ec7075ceeb070c8
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url, include
from BeautyWorld.views import api_category, sign_up, sign_in, api_salon, api_cart, api_orders, api_offers, api_choose_offer,api_create_order, getfile, login
urlpatterns = [
#url(r'^signin$', signin), # POST
#url(r'^signup$', signup), # POST
#url(r'^edit_user$', edit_user), # POST
#url(r'^add_vehicle$', add_vehicle), # POST
#url(r'^edit_vehicle$', edit_vehicle), # POST
#url(r'^get_list_of_actual_crashes$', get_list_of_actual_crashes), # GET
#url(r'^get_list_of_history_crashes$', get_list_of_history_crashes), # GET
#url(r'^get_list_of_offers$', get_list_of_offers), # GET
##url(r'^get_list_of_vehicles$', get_list_of_vehicles), # GET
url(r'^category$', api_category), # GET
url(r'^salon$', api_salon), # GET
url(r'^signup$', sign_up), # POST
url(r'^signin$', sign_in), # POST
#url(r'^cart$', api_cart), # POST
url(r'^order$', api_orders), # GET
url(r'^offer$', api_offers), # GET
url(r'^choose_offer$', api_choose_offer),# POST
url(r'^create_order$', api_create_order),# POST
]
| 48.347826 | 156 | 0.667266 |
8ed93dbd35c78800f001b35ce84682de3169c04f
| 1,126 |
py
|
Python
|
examples/vedirect_mqtt.py
|
NickNothom/vedirect
|
9219b75d18640b2bd7e5bbe5ab1df3cb21e8d89f
|
[
"MIT"
] | 59 |
2016-06-14T18:03:07.000Z
|
2022-03-26T10:25:33.000Z
|
examples/vedirect_mqtt.py
|
NickNothom/vedirect
|
9219b75d18640b2bd7e5bbe5ab1df3cb21e8d89f
|
[
"MIT"
] | 8 |
2019-01-19T21:11:07.000Z
|
2022-03-28T20:17:54.000Z
|
examples/vedirect_mqtt.py
|
NickNothom/vedirect
|
9219b75d18640b2bd7e5bbe5ab1df3cb21e8d89f
|
[
"MIT"
] | 30 |
2016-05-26T14:48:34.000Z
|
2022-03-26T10:20:19.000Z
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import argparse, os
import paho.mqtt.client as mqtt
from vedirect import Vedirect
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Process VE.Direct protocol')
parser.add_argument('--port', help='Serial port')
parser.add_argument('--timeout', help='Serial port read timeout', type=int, default='60')
parser.add_argument('--mqttbroker', help='MQTT broker address', type=str, default='test.mosquitto.org')
parser.add_argument('--mqttbrokerport', help='MQTT broker port', type=int, default='1883')
parser.add_argument('--topicprefix', help='MQTT topic prefix', type=str, default='vedirect/')
args = parser.parse_args()
ve = Vedirect(args.port, args.timeout)
client = mqtt.Client()
client.connect(args.mqttbroker, args.mqttbrokerport, 60)
client.loop_start()
def mqtt_send_callback(packet):
for key, value in packet.items():
if key != 'SER#': # topic cannot contain MQTT wildcards
client.publish(args.topicprefix + key, value)
ve.read_data_callback(mqtt_send_callback)
| 38.827586 | 107 | 0.694494 |
f09a1109c79327fb8f0575413ce51f995cd6104f
| 1,250 |
py
|
Python
|
applications/experimental/pipelines/pipelines/nodes/__init__.py
|
SunYanCN/PaddleNLP
|
31deea6c989f399b4552ee711d9f7d62768d645f
|
[
"Apache-2.0"
] | null | null | null |
applications/experimental/pipelines/pipelines/nodes/__init__.py
|
SunYanCN/PaddleNLP
|
31deea6c989f399b4552ee711d9f7d62768d645f
|
[
"Apache-2.0"
] | null | null | null |
applications/experimental/pipelines/pipelines/nodes/__init__.py
|
SunYanCN/PaddleNLP
|
31deea6c989f399b4552ee711d9f7d62768d645f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pipelines.utils.import_utils import safe_import
from pipelines.nodes.base import BaseComponent
from pipelines.nodes.file_classifier import FileTypeClassifier
from pipelines.nodes.file_converter import (
BaseConverter,
DocxToTextConverter,
ImageToTextConverter,
MarkdownConverter,
PDFToTextConverter,
PDFToTextOCRConverter,
TextConverter, )
from pipelines.nodes.preprocessor import BasePreProcessor, PreProcessor
from pipelines.nodes.ranker import BaseRanker, ErnieRanker
from pipelines.nodes.reader import BaseReader, ErnieReader
from pipelines.nodes.retriever import BaseRetriever, DensePassageRetriever
| 40.322581 | 74 | 0.8024 |
ff364b21424309db83018d192f67cf61996debfa
| 2,562 |
py
|
Python
|
receive_email.py
|
iamywang/gtk_email
|
2bee07b851a830ec76603baa8f0b2460a5dc06a8
|
[
"MIT"
] | null | null | null |
receive_email.py
|
iamywang/gtk_email
|
2bee07b851a830ec76603baa8f0b2460a5dc06a8
|
[
"MIT"
] | null | null | null |
receive_email.py
|
iamywang/gtk_email
|
2bee07b851a830ec76603baa8f0b2460a5dc06a8
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
from email.parser import Parser
from email.header import decode_header
from email.utils import parseaddr
import poplib
# 输入邮件地址, 口令和POP3服务器地址:
email = input('Email: ')
password = input('Password: ')
pop3_server = input('POP3 server: ')
def guess_charset(msg):
charset = msg.get_charset()
if charset is None:
content_type = msg.get('Content-Type', '').lower()
pos = content_type.find('charset=')
if pos >= 0:
charset = content_type[pos + 8:].strip()
return charset
def decode_str(s):
value, charset = decode_header(s)[0]
if charset:
value = value.decode(charset)
return value
def print_info(msg, indent=0):
if indent == 0:
for header in ['From', 'To', 'Subject']:
value = msg.get(header, '')
if value:
if header == 'Subject':
value = decode_str(value)
else:
hdr, addr = parseaddr(value)
name = decode_str(hdr)
value = u'%s <%s>' % (name, addr)
print('%s%s: %s' % (' ' * indent, header, value))
if (msg.is_multipart()):
parts = msg.get_payload()
for n, part in enumerate(parts):
print('%spart %s' % (' ' * indent, n))
print('%s--------------------' % (' ' * indent))
print_info(part, indent + 1)
else:
content_type = msg.get_content_type()
if content_type == 'text/plain' or content_type == 'text/html':
content = msg.get_payload(decode=True)
charset = guess_charset(msg)
if charset:
content = content.decode(charset)
print('%sText: %s' % (' ' * indent, content + '...'))
else:
print('%sAttachment: %s' % (' ' * indent, content_type))
# 连接到POP3服务器:
server = poplib.POP3(pop3_server)
# 可以打开或关闭调试信息:
server.set_debuglevel(1)
# 可选:打印POP3服务器的欢迎文字:
print(server.getwelcome().decode('utf-8'))
# 身份认证:
server.user(email)
server.pass_(password)
# stat()返回邮件数量和占用空间:
print('Messages: %s. Size: %s' % server.stat())
# list()返回所有邮件的编号:
resp, mails, octets = server.list()
# 可以查看返回的列表类似[b'1 82923', b'2 2184', ...]
print(mails)
# 获取最新一封邮件, 注意索引号从1开始:
index = len(mails)
resp, lines, octets = server.retr(index)
# lines存储了邮件的原始文本的每一行,
# 可以获得整个邮件的原始文本:
msg_content = b'\r\n'.join(lines).decode('utf-8')
# 稍后解析出邮件:
msg = Parser().parsestr(msg_content)
print_info(msg)
# 可以根据邮件索引号直接从服务器删除邮件:
# server.dele(index)
# 关闭连接:
server.quit()
| 28.466667 | 71 | 0.578845 |
2609197ee0f113173c7e6231d8f22b0277068c6a
| 9,366 |
py
|
Python
|
server/models/shape/__init__.py
|
jirsat/PlanarAlly
|
8c3ed434f3a1d83aa89216b3daded916096f8acd
|
[
"MIT"
] | null | null | null |
server/models/shape/__init__.py
|
jirsat/PlanarAlly
|
8c3ed434f3a1d83aa89216b3daded916096f8acd
|
[
"MIT"
] | null | null | null |
server/models/shape/__init__.py
|
jirsat/PlanarAlly
|
8c3ed434f3a1d83aa89216b3daded916096f8acd
|
[
"MIT"
] | null | null | null |
import json
from peewee import BooleanField, FloatField, ForeignKeyField, IntegerField, TextField
from playhouse.shortcuts import model_to_dict, update_model_from_dict
from typing import Any, Dict, List, Tuple
from utils import logger
from ..asset import Asset
from ..base import BaseModel
from ..campaign import Layer
from ..groups import Group
from ..label import Label
from ..user import User
__all__ = [
"AssetRect",
"Aura",
"Circle",
"CircularToken",
"Line",
"Polygon",
"Rect",
"Shape",
"ShapeLabel",
"ShapeOwner",
"Text",
"Tracker",
]
class Shape(BaseModel):
uuid = TextField(primary_key=True)
layer = ForeignKeyField(Layer, backref="shapes", on_delete="CASCADE")
type_ = TextField()
x = FloatField()
y = FloatField()
name = TextField(null=True)
name_visible = BooleanField(default=True)
fill_colour = TextField(default="#000")
stroke_colour = TextField(default="#fff")
vision_obstruction = BooleanField(default=False)
movement_obstruction = BooleanField(default=False)
is_token = BooleanField(default=False)
annotation = TextField(default="")
draw_operator = TextField(default="source-over")
index = IntegerField()
options = TextField(null=True)
badge = IntegerField(default=1)
show_badge = BooleanField(default=False)
default_edit_access = BooleanField(default=False)
default_vision_access = BooleanField(default=False)
is_invisible = BooleanField(default=False)
is_defeated = BooleanField(default=False)
default_movement_access = BooleanField(default=False)
is_locked = BooleanField(default=False)
angle = FloatField(default=0)
stroke_width = IntegerField(default=2)
asset = ForeignKeyField(Asset, backref="shapes", null=True, default=None)
group = ForeignKeyField(Group, backref="members", null=True, default=None)
annotation_visible = BooleanField(default=False)
ignore_zoom_size = BooleanField(default=False)
def __repr__(self):
return f"<Shape {self.get_path()}>"
def get_path(self):
try:
return f"{self.name}@{self.layer.get_path()}"
except:
return self.name
def get_options(self) -> Dict[str, Any]:
return dict(json.loads(self.options))
def set_options(self, options: Dict[str, Any]) -> None:
self.options = json.dumps([[k, v] for k, v in options.items()])
# todo: Change this API to accept a PlayerRoom instead
def as_dict(self, user: User, dm: bool):
data = model_to_dict(self, recurse=False, exclude=[Shape.layer, Shape.index])
# Owner query > list of usernames
data["owners"] = [owner.as_dict() for owner in self.owners]
# Layer query > layer name
data["layer"] = self.layer.name
data["floor"] = self.layer.floor.name
# Aura and Tracker queries > json
owned = (
dm
or self.default_edit_access
or self.default_vision_access
or any(user.name == o["user"] for o in data["owners"])
)
tracker_query = self.trackers
aura_query = self.auras
label_query = self.labels.join(Label)
if not owned:
if not self.annotation_visible:
data["annotation"] = ""
tracker_query = tracker_query.where(Tracker.visible)
aura_query = aura_query.where(Aura.visible)
label_query = label_query.where(Label.visible)
if not self.name_visible:
data["name"] = "?"
data["trackers"] = [t.as_dict() for t in tracker_query]
data["auras"] = [a.as_dict() for a in aura_query]
data["labels"] = [l.as_dict() for l in label_query]
# Subtype
data.update(**self.subtype.as_dict(exclude=[self.subtype.__class__.shape]))
return data
def center_at(self, x: int, y: int) -> None:
x_off, y_off = self.subtype.get_center_offset(x, y)
self.x = x - x_off
self.y = y - y_off
@property
def subtype(self):
return getattr(self, f"{self.type_}_set").get()
class ShapeLabel(BaseModel):
shape = ForeignKeyField(Shape, backref="labels", on_delete="CASCADE")
label = ForeignKeyField(Label, backref="shapes", on_delete="CASCADE")
def as_dict(self):
return self.label.as_dict()
class Tracker(BaseModel):
uuid = TextField(primary_key=True)
shape = ForeignKeyField(Shape, backref="trackers", on_delete="CASCADE")
visible = BooleanField()
name = TextField()
value = IntegerField()
maxvalue = IntegerField()
draw = BooleanField()
primary_color = TextField()
secondary_color = TextField()
def __repr__(self):
return f"<Tracker {self.name} {self.shape.get_path()}>"
def as_dict(self):
return model_to_dict(self, recurse=False, exclude=[Tracker.shape])
class Aura(BaseModel):
uuid = TextField(primary_key=True)
shape = ForeignKeyField(Shape, backref="auras", on_delete="CASCADE")
vision_source = BooleanField()
visible = BooleanField()
name = TextField()
value = IntegerField()
dim = IntegerField()
colour = TextField()
active = BooleanField()
border_colour = TextField()
angle = IntegerField()
direction = IntegerField()
def __repr__(self):
return f"<Aura {self.name} {self.shape.get_path()}>"
def as_dict(self):
return model_to_dict(self, recurse=False, exclude=[Aura.shape])
class ShapeOwner(BaseModel):
shape = ForeignKeyField(Shape, backref="owners", on_delete="CASCADE")
user = ForeignKeyField(User, backref="shapes", on_delete="CASCADE")
edit_access = BooleanField()
vision_access = BooleanField()
movement_access = BooleanField()
def __repr__(self):
return f"<ShapeOwner {self.user.name} {self.shape.get_path()}>"
def as_dict(self):
return {
"shape": self.shape.uuid,
"user": self.user.name,
"edit_access": self.edit_access,
"movement_access": self.movement_access,
"vision_access": self.vision_access,
}
class ShapeType(BaseModel):
shape = ForeignKeyField(Shape, primary_key=True, on_delete="CASCADE")
@staticmethod
def pre_create(**kwargs):
return kwargs
@staticmethod
def post_create(subshape, **kwargs):
"""
Used for special shapes that need extra behaviour after being created.
"""
pass
def as_dict(self, *args, **kwargs):
return model_to_dict(self, *args, **kwargs)
def update_from_dict(self, data, *args, **kwargs):
return update_model_from_dict(self, data, *args, **kwargs)
def get_center_offset(self, x: int, y: int) -> Tuple[int, int]:
return 0, 0
def set_location(self, points: List[List[int]]) -> None:
logger.error("Attempt to set location on shape without location info")
class BaseRect(ShapeType):
width = FloatField()
height = FloatField()
def get_center_offset(self, x: int, y: int) -> Tuple[int, int]:
return self.width / 2, self.height / 2
class AssetRect(BaseRect):
src = TextField()
class Circle(ShapeType):
radius = FloatField()
viewing_angle = FloatField(null=True)
class CircularToken(Circle):
text = TextField()
font = TextField()
class Line(ShapeType):
x2 = FloatField()
y2 = FloatField()
line_width = IntegerField()
def get_center_offset(self, x: int, y: int) -> Tuple[int, int]:
return (self.x2 - self.x) / 2, (self.y2 - self.y) / 2
class Polygon(ShapeType):
vertices = TextField()
line_width = IntegerField()
open_polygon = BooleanField()
@staticmethod
def pre_create(**kwargs):
kwargs["vertices"] = json.dumps(kwargs["vertices"])
return kwargs
def as_dict(self, *args, **kwargs):
model = model_to_dict(self, *args, **kwargs)
model["vertices"] = json.loads(model["vertices"])
return model
def update_from_dict(self, data, *args, **kwargs):
data["vertices"] = json.dumps(data["vertices"])
return update_model_from_dict(self, data, *args, **kwargs)
def set_location(self, points: List[List[int]]) -> None:
self.vertices = json.dumps(points)
self.save()
class Rect(BaseRect):
pass
class Text(ShapeType):
text = TextField()
font_size = IntegerField()
class ToggleComposite(ShapeType):
"""
Toggle shapes are composites that have multiple variants but only show one at a time.
"""
active_variant = TextField(null=True)
@staticmethod
def post_create(subshape, **kwargs):
for variant in kwargs.get("variants", []):
CompositeShapeAssociation.create(
parent=subshape, variant=variant["uuid"], name=variant["name"]
)
def as_dict(self, *args, **kwargs):
model = model_to_dict(self, *args, **kwargs)
model["variants"] = [
{"uuid": sv.variant.uuid, "name": sv.name}
for sv in self.shape.shape_variants
]
return model
class CompositeShapeAssociation(BaseModel):
variant = ForeignKeyField(Shape, backref="composite_parent", on_delete="CASCADE")
parent = ForeignKeyField(Shape, backref="shape_variants", on_delete="CASCADE")
name = TextField()
| 30.508143 | 89 | 0.647128 |
fa31cc1e7dde45f7fe8905e46d960d04e407c152
| 1,006 |
py
|
Python
|
src/gimelstudio/api/api.py
|
yonMaor/GimelStudio
|
7ed7db429e61e0413791ad261583c7018f888953
|
[
"Apache-2.0"
] | null | null | null |
src/gimelstudio/api/api.py
|
yonMaor/GimelStudio
|
7ed7db429e61e0413791ad261583c7018f888953
|
[
"Apache-2.0"
] | null | null | null |
src/gimelstudio/api/api.py
|
yonMaor/GimelStudio
|
7ed7db429e61e0413791ad261583c7018f888953
|
[
"Apache-2.0"
] | null | null | null |
# ----------------------------------------------------------------------------
# Gimel Studio Copyright 2019-2022 by the Gimel Studio project contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
# Nothing here yet...
class Output(object):
def __init__(self, idname, datatype, label, visible=True):
self.idname = idname
self.datatype = datatype
self.label = label
self.visible = visible
| 40.24 | 78 | 0.619284 |
b0d9522651744807cd30e6d36c14a6868d46401d
| 2,994 |
py
|
Python
|
lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-stl/libcxx/unordered/TestDataFormatterUnordered.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 765 |
2015-12-03T16:44:59.000Z
|
2022-03-07T12:41:10.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-stl/libcxx/unordered/TestDataFormatterUnordered.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 1,815 |
2015-12-11T23:56:05.000Z
|
2020-01-10T19:28:43.000Z
|
lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-stl/libcxx/unordered/TestDataFormatterUnordered.py
|
dan-zheng/llvm-project
|
6b792850da0345274758c9260fda5df5e57ab486
|
[
"Apache-2.0"
] | 284 |
2015-12-03T16:47:25.000Z
|
2022-03-12T05:39:48.000Z
|
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
class LibcxxUnorderedDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
TestBase.setUp(self)
ns = 'ndk' if lldbplatformutil.target_is_android() else ''
self.namespace = 'std::__' + ns + '1'
@add_test_categories(["libc++"])
def test_with_run_command(self):
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_source_regexp(
self, "Set break point at this line.")
self.runCmd("run", RUN_SUCCEEDED)
# The stop reason of the thread should be breakpoint.
self.expect("thread list", STOPPED_DUE_TO_BREAKPOINT,
substrs=['stopped',
'stop reason = breakpoint'])
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
self.runCmd('type filter clear', check=False)
self.runCmd('type synth clear', check=False)
self.runCmd(
"settings set target.max-children-count 256",
check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
ns = self.namespace
self.look_for_content_and_continue(
"map", ['%s::unordered_map' %
ns, 'size=5 {', 'hello', 'world', 'this', 'is', 'me'])
self.look_for_content_and_continue(
"mmap", ['%s::unordered_multimap' % ns, 'size=6 {', 'first = 3', 'second = "this"',
'first = 2', 'second = "hello"'])
self.look_for_content_and_continue(
"iset", ['%s::unordered_set' %
ns, 'size=5 {', '\[\d\] = 5', '\[\d\] = 3', '\[\d\] = 2'])
self.look_for_content_and_continue(
"sset", ['%s::unordered_set' % ns, 'size=5 {', '\[\d\] = "is"', '\[\d\] = "world"',
'\[\d\] = "hello"'])
self.look_for_content_and_continue(
"imset", ['%s::unordered_multiset' % ns, 'size=6 {', '(\[\d\] = 3(\\n|.)+){3}',
'\[\d\] = 2', '\[\d\] = 1'])
self.look_for_content_and_continue(
"smset", ['%s::unordered_multiset' % ns, 'size=5 {', '(\[\d\] = "is"(\\n|.)+){2}',
'(\[\d\] = "world"(\\n|.)+){2}'])
def look_for_content_and_continue(self, var_name, patterns):
self.expect(("frame variable %s" % var_name), patterns=patterns)
self.expect(("frame variable %s" % var_name), patterns=patterns)
self.runCmd("continue")
| 36.962963 | 95 | 0.560454 |
ea6255e1350834ec3b2d09ac0e0b4520eaff3b17
| 2,177 |
py
|
Python
|
test/SConsGnu/AcProgChecks/AcCheckProgs/sconstest-accheckprogs-example1.py
|
ptomulik/scons-gnu-build
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
[
"Unlicense"
] | null | null | null |
test/SConsGnu/AcProgChecks/AcCheckProgs/sconstest-accheckprogs-example1.py
|
ptomulik/scons-gnu-build
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
[
"Unlicense"
] | 1 |
2015-02-13T04:30:45.000Z
|
2015-02-13T04:30:45.000Z
|
test/SConsGnu/AcProgChecks/AcCheckProgs/sconstest-accheckprogs-example1.py
|
ptomulik/scons-gnu-build
|
9c46908eed50679d7aaaaf472e324c97545ac837
|
[
"Unlicense"
] | null | null | null |
#
# Copyright (c) 2012-2014 by Pawel Tomulik
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE
__docformat__ = "restructuredText"
"""
TODO: write description
"""
import TestSCons
##############################################################################
#
##############################################################################
test = TestSCons.TestSCons()
test.dir_fixture('../../../../SConsGnu', 'site_scons/SConsGnu')
test.write('SConstruct',
"""
# SConstruct
from SConsGnu import AcProgChecks
env = Environment() # create an environment
cfg = Configure(env) # create SConf object
cfg.AddTests(AcProgChecks.Tests()) # add tets for alternative programs
curl = cfg.AcCheckProgs(['gcurl', 'curl']) # perform the check
env = cfg.Finish() # finish configuration
print "curl: %r" % curl # print returned value
""")
test.run()
test.must_contain_all_lines(test.stdout(), [
'Checking for gcurl... ',
'Checking for curl... ',
'curl: '
])
test.pass_test()
# Local Variables:
# # tab-width:4
# # indent-tabs-mode:nil
# # End:
# vim: set syntax=python expandtab tabstop=4 shiftwidth=4:
| 36.283333 | 80 | 0.666973 |
a8ede3bdf51aeb17dd862db5cf94ba84155756f5
| 355 |
py
|
Python
|
spikeforest/sf_batch/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/sf_batch/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
spikeforest/sf_batch/__init__.py
|
tjd2002/spikeforest2
|
2e393564b858b2995aa2ccccd9bd73065681b5de
|
[
"Apache-2.0"
] | null | null | null |
# from .sf_batch import sf_batch_prepare, sf_batch_run, sf_batch_assemble
from .sf_summarize_recording import sf_summarize_recording
from .sf_sort_recording import sf_sort_recording
# from .sf_batch2 import clear_job_results, download_recordings, run_jobs, assemble_job_results
from .compute_units_info import compute_units_info, select_units_on_channels
| 59.166667 | 95 | 0.884507 |
a2d519139b574801e88a1d639d68724c35b5e514
| 3,221 |
py
|
Python
|
test/unit/ggrc/models/test_json_comparator.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | 1 |
2019-01-12T23:46:00.000Z
|
2019-01-12T23:46:00.000Z
|
test/unit/ggrc/models/test_json_comparator.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
test/unit/ggrc/models/test_json_comparator.py
|
MikalaiMikalalai/ggrc-core
|
f0f83b3638574bb64de474f3b70ed27436ca812a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# Copyright (C) 2020 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
"""Test module for with_custom_restrictions mixin"""
import unittest
from datetime import datetime, date
import ddt
from ggrc.utils import json_comparator
@ddt.ddt
class TestJsonComparator(unittest.TestCase):
"""Test class for test_custom_restrictions"""
@ddt.data(
(datetime(2019, 10, 24), '2019-10-24T00:00:00'),
(date(2019, 10, 24), '2019-10-24')
)
@ddt.unpack
def test_convert_to_string(self, obj, exp_str):
"""Test convert_to_string method"""
res_str = json_comparator.convert_to_string(obj)
self.assertEqual(res_str, exp_str)
@ddt.data(
(
[],
[],
True,
),
(
[1, 2, 3],
[1, 2],
False,
),
(
[{'1': 1, '5': 5}, {'1': 1, '2': 2}],
[{'1': 1, '5': 5}, {'1': 1, '2': 2}],
True,
),
(
[{'id': 123, 'type': 'assessment'}],
[{'id': 123, 'type': 'assessment'}],
True,
),
(
[{'id': 123, 'type': 'assessment', 'attr1': 1}],
[{'id': 123, 'type': 'assessment', 'attr2': 2, 'attr3': 3}],
True,
),
(
[{'id': 123, 'type': 'assessment'}],
[{'id': 765, 'type': 'assessment'}],
False,
),
(
[{'id': 123, 'type': 'assessment'}],
[{'id': 123, 'type': 'issue'}],
False,
),
)
@ddt.unpack
def test_lists_equal(self, list1, list2, exp_result):
"""Test lists_equal method"""
result = json_comparator.lists_equal(list1, list2)
self.assertEqual(result, exp_result)
@ddt.data(
(
{},
{},
True,
),
(
{'1': 1, '2': 2},
{'1': 1, '2': 2},
True,
),
(
{'1': 1, '2': 2},
{'1': 1, '2': 2, '3': 3},
True,
),
(
{'1': 1, '2': 2, '3': 3},
{'1': 1, '2': 2},
True,
),
(
{'1': 1, '2': 2, '3': 5},
{'1': 1, '2': 2, '3': 3},
False,
),
(
{'1': 1, '2': 2, '_3': 5},
{'1': 1, '2': 2, '_3': 3},
True,
),
)
@ddt.unpack
def test_dicts_equal(self, dict1, dict2, exp_result):
"""Test dicts_equal method"""
result = json_comparator.dicts_equal(dict1, dict2)
self.assertEqual(result, exp_result)
@ddt.data(
(
"",
"",
True,
),
(
{'1': 1, '2': 2},
{'1': 1, '2': 2},
True,
),
(
[1, 2, 3],
[1, 2, 3],
True,
),
(
[1, 2, 5],
[1, 2, 3],
False,
),
(
datetime(2019, 10, 24),
datetime(2019, 10, 24),
True,
),
(
datetime(2019, 10, 24),
date(2019, 10, 24),
False,
),
)
@ddt.unpack
def test_fields_equal(self, obj_field, src_field, exp_result):
"""Test dicts_equal method"""
result = json_comparator.fields_equal(obj_field, src_field)
self.assertEqual(result, exp_result)
| 21.61745 | 78 | 0.427197 |
a3240832a0826cb0c82dbfc510e7a41c68c18900
| 8,803 |
py
|
Python
|
test/tool_shed/functional/test_0120_simple_repository_dependency_multiple_owners.py
|
bopopescu/phyG
|
023f505b705ab953f502cbc55e90612047867583
|
[
"CC-BY-3.0"
] | 84 |
2017-10-25T15:49:21.000Z
|
2021-11-28T21:25:54.000Z
|
data/test/python/a3240832a0826cb0c82dbfc510e7a41c68c18900test_0120_simple_repository_dependency_multiple_owners.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 5 |
2018-03-29T11:50:46.000Z
|
2021-04-26T13:33:18.000Z
|
data/test/python/a3240832a0826cb0c82dbfc510e7a41c68c18900test_0120_simple_repository_dependency_multiple_owners.py
|
vassalos/deep-learning-lang-detection
|
cbb00b3e81bed3a64553f9c6aa6138b2511e544e
|
[
"MIT"
] | 24 |
2017-11-22T08:31:00.000Z
|
2022-03-27T01:22:31.000Z
|
from tool_shed.base.twilltestcase import ShedTwillTestCase, common, os
import tool_shed.base.test_db_util as test_db_util
datatypes_repository_name = 'blast_datatypes_0120'
datatypes_repository_description = 'Galaxy applicable datatypes for BLAST'
datatypes_repository_long_description = 'Galaxy datatypes for the BLAST top hit descriptons tool'
tool_repository_name = 'blastxml_to_top_descr_0120'
tool_repository_description = 'BLAST top hit descriptions'
tool_repository_long_description = 'Make a table from BLAST XML'
'''
Tool shed side:
1) Create and populate blast_datatypes_0120.
1a) Check for appropriate strings.
2) Create and populate blastxml_to_top_descr_0120.
2a) Check for appropriate strings.
3) Upload repository_dependencies.xml to blastxml_to_top_descr_0120 that defines a relationship to blast_datatypes_0120.
3a) Check for appropriate strings.
'''
base_datatypes_count = 0
repository_datatypes_count = 0
class TestRepositoryMultipleOwners( ShedTwillTestCase ):
def test_0000_initiate_users( self ):
"""Create necessary user accounts and login as an admin user."""
"""
Create all the user accounts that are needed for this test script to run independently of other tests.
Previously created accounts will not be re-created.
"""
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
test_user_1 = test_db_util.get_user( common.test_user_1_email )
assert test_user_1 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_1_email
test_user_1_private_role = test_db_util.get_private_role( test_user_1 )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
test_user_2 = test_db_util.get_user( common.test_user_1_email )
assert test_user_2 is not None, 'Problem retrieving user with email %s from the database' % common.test_user_2_email
test_user_2_private_role = test_db_util.get_private_role( test_user_2 )
self.logout()
self.login( email=common.admin_email, username=common.admin_username )
admin_user = test_db_util.get_user( common.admin_email )
assert admin_user is not None, 'Problem retrieving user with email %s from the database' % common.admin_email
admin_user_private_role = test_db_util.get_private_role( admin_user )
def test_0005_create_datatypes_repository( self ):
"""Create and populate the blast_datatypes_0120 repository"""
"""
We are at step 1.
Create and populate blast_datatypes.
"""
category = self.create_category( name='Test 0120', description='Description of test 0120' )
self.logout()
self.login( email=common.test_user_2_email, username=common.test_user_2_name )
strings_displayed = [ 'Repository %s' % "'%s'" % datatypes_repository_name,
'Repository %s has been created' % "'%s'" % datatypes_repository_name ]
repository = self.get_or_create_repository( name=datatypes_repository_name,
description=datatypes_repository_description,
long_description=datatypes_repository_long_description,
owner=common.test_user_2_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=strings_displayed )
self.upload_file( repository,
filename='blast/blast_datatypes.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded blast_datatypes tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0010_verify_datatypes_repository( self ):
'''Verify the blast_datatypes_0120 repository.'''
'''
We are at step 1a.
Check for appropriate strings, most importantly BlastXml, BlastNucDb, and BlastProtDb,
the datatypes that are defined in datatypes_conf.xml.
'''
global repository_datatypes_count
repository = test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_2_name )
strings_displayed = [ 'BlastXml', 'BlastNucDb', 'BlastProtDb', 'application/xml', 'text/html', 'blastxml', 'blastdbn', 'blastdbp']
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
repository_datatypes_count = int( self.get_repository_datatypes_count( repository ) )
def test_0015_create_tool_repository( self ):
"""Create and populate the blastxml_to_top_descr_0120 repository"""
"""
We are at step 2.
Create and populate blastxml_to_top_descr_0120.
"""
category = self.create_category( name='Test 0120', description='Description of test 0120' )
self.logout()
self.login( email=common.test_user_1_email, username=common.test_user_1_name )
strings_displayed = [ 'Repository %s' % "'%s'" % tool_repository_name,
'Repository %s has been created' % "'%s'" % tool_repository_name ]
repository = self.get_or_create_repository( name=tool_repository_name,
description=tool_repository_description,
long_description=tool_repository_long_description,
owner=common.test_user_1_name,
category_id=self.security.encode_id( category.id ),
strings_displayed=strings_displayed )
self.upload_file( repository,
filename='blast/blastxml_to_top_descr.tar',
filepath=None,
valid_tools_only=True,
uncompress_file=True,
remove_repo_files_not_in_tar=False,
commit_message='Uploaded blastxml_to_top_descr tarball.',
strings_displayed=[],
strings_not_displayed=[] )
def test_0020_verify_tool_repository( self ):
'''Verify the blastxml_to_top_descr_0120 repository.'''
'''
We are at step 2a.
Check for appropriate strings, such as tool name, description, and version.
'''
repository = test_db_util.get_repository_by_name_and_owner( tool_repository_name, common.test_user_1_name )
strings_displayed = [ 'blastxml_to_top_descr_0120', 'BLAST top hit descriptions', 'Make a table from BLAST XML' ]
strings_displayed.extend( [ '0.0.1', 'Valid tools'] )
self.display_manage_repository_page( repository, strings_displayed=strings_displayed )
def test_0025_create_repository_dependency( self ):
'''Create a repository dependency on blast_datatypes_0120.'''
'''
We are at step 3.
Create a simple repository dependency for blastxml_to_top_descr_0120 that defines a dependency on blast_datatypes_0120.
'''
datatypes_repository = test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_2_name )
tool_repository = test_db_util.get_repository_by_name_and_owner( tool_repository_name, common.test_user_1_name )
dependency_xml_path = self.generate_temp_path( 'test_0120', additional_paths=[ 'dependencies' ] )
datatypes_tuple = ( self.url, datatypes_repository.name, datatypes_repository.user.username, self.get_repository_tip( datatypes_repository ) )
self.create_repository_dependency( repository=tool_repository, repository_tuples=[ datatypes_tuple ], filepath=dependency_xml_path )
def test_0040_verify_repository_dependency( self ):
'''Verify the created repository dependency.'''
'''
We are at step 3a.
Check the newly created repository dependency to ensure that it was defined and displays correctly.
'''
datatypes_repository = test_db_util.get_repository_by_name_and_owner( datatypes_repository_name, common.test_user_2_name )
tool_repository = test_db_util.get_repository_by_name_and_owner( tool_repository_name, common.test_user_1_name )
self.check_repository_dependency( tool_repository, datatypes_repository )
| 58.686667 | 150 | 0.667841 |
73472552390074553cb5b142681d0af102a1843d
| 1,213 |
bzl
|
Python
|
for_workspace/repositories.bzl
|
ktf/rules_foreign_cc
|
fe335ece190e5971432fb806cdb459047e577a42
|
[
"Apache-2.0"
] | null | null | null |
for_workspace/repositories.bzl
|
ktf/rules_foreign_cc
|
fe335ece190e5971432fb806cdb459047e577a42
|
[
"Apache-2.0"
] | null | null | null |
for_workspace/repositories.bzl
|
ktf/rules_foreign_cc
|
fe335ece190e5971432fb806cdb459047e577a42
|
[
"Apache-2.0"
] | null | null | null |
""" Remote repositories, used by this project itself """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
def repositories():
_all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
http_archive(
name = "bazel_skylib",
sha256 = "97e70364e9249702246c0e9444bccdc4b847bed1eb03c5a3ece4f83dfe6abc44",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
"https://github.com/bazelbuild/bazel-skylib/releases/download/1.0.2/bazel-skylib-1.0.2.tar.gz",
],
)
http_archive(
name = "ninja_build",
build_file_content = _all_content,
sha256 = "86b8700c3d0880c2b44c2ff67ce42774aaf8c28cbf57725cb881569288c1c6f4",
strip_prefix = "ninja-1.8.2",
urls = [
"https://github.com/ninja-build/ninja/archive/v1.8.2.tar.gz",
],
)
http_archive(
name = "cmake",
build_file_content = _all_content,
strip_prefix = "CMake-3.12.1",
urls = [
"https://github.com/Kitware/CMake/archive/v3.12.1.tar.gz",
],
)
| 34.657143 | 126 | 0.618302 |
e6d71d8cc2c4365ede785ddd146a1944b28da486
| 4,441 |
py
|
Python
|
calf/lexer.py
|
arrdem/calf
|
c8e83157c60eb9a14e2bdf39e14cec8bf3a827ae
|
[
"MIT"
] | null | null | null |
calf/lexer.py
|
arrdem/calf
|
c8e83157c60eb9a14e2bdf39e14cec8bf3a827ae
|
[
"MIT"
] | null | null | null |
calf/lexer.py
|
arrdem/calf
|
c8e83157c60eb9a14e2bdf39e14cec8bf3a827ae
|
[
"MIT"
] | null | null | null |
"""
Calf lexer.
Provides machinery for lexing sources of text into sequences of tokens with textual information, as
well as buffer position information appropriate for either full AST parsing, lossless syntax tree
parsing, linting or other use.
"""
import io
import re
import sys
from calf.token import CalfToken
from calf.io.reader import PeekPosReader
from calf.grammar import TOKENS
from calf.util import *
class CalfLexer:
"""
Lexer object.
Wraps something you can read characters from, and presents a lazy sequence of Token objects.
Raises ValueError at any time due to either a conflict in the grammar being lexed, or incomplete
input. Exceptions from the backing reader object are not masked.
Rule order is used to decide conflicts. If multiple patterns would match an input, the "first"
in token list order wins.
"""
def __init__(self, stream, source=None, metadata=None, tokens=TOKENS):
"""FIXME"""
self._stream = (
PeekPosReader(stream) if not isinstance(stream, PeekPosReader) else stream
)
self.source = source
self.metadata = metadata or {}
self.tokens = tokens
def __next__(self):
"""
Tries to scan the next token off of the backing stream.
Starting with a list of all available tokens, an empty buffer and a single new character
peeked from the backing stream, reads more character so long as adding the next character
still leaves one or more possible matching "candidates" (token patterns).
When adding the next character from the stream would build an invalid token, a token of the
resulting single candidate type is generated.
At the end of input, if we have a single candidate remaining, a final token of that type is
generated. Otherwise we are in an incomplete input state either due to incomplete input or
a grammar conflict.
"""
buffer = ""
candidates = self.tokens
position, chr = self._stream.peek()
while chr:
if not candidates:
raise ValueError("Entered invalid state - no candidates!")
buff2 = buffer + chr
can2 = [t for t in candidates if re.fullmatch(t[0], buff2)]
# Try to include the last read character to support longest-wins grammars
if not can2 and len(candidates) >= 1:
pat, type = candidates[0]
groups = re.match(re.compile(pat), buffer).groupdict()
groups.update(self.metadata)
return CalfToken(type, buffer, self.source, position, groups)
else:
# Update the buffers
buffer = buff2
candidates = can2
# consume the 'current' character for side-effects
self._stream.read()
# set chr to be the next peeked character
_, chr = self._stream.peek()
if len(candidates) >= 1:
pat, type = candidates[0]
groups = re.match(re.compile(pat), buffer).groupdict()
groups.update(self.metadata)
return CalfToken(type, buffer, self.source, position, groups)
else:
raise ValueError(
"Encountered end of buffer with incomplete token %r" % (buffer,)
)
def __iter__(self):
"""
Scans tokens out of the character stream.
May raise ValueError if there is either an issue with the grammar or the input.
Will not mask any exceptions from the backing reader.
"""
# While the character stream isn't empty
while self._stream.peek()[1] != "":
yield next(self)
def lex_file(path, metadata=None):
"""
Returns the sequence of tokens resulting from lexing all text in the named file.
"""
with open(path, "r") as f:
return list(CalfLexer(f, path, {}))
def lex_buffer(buffer, source="<Buffer>", metadata=None):
"""
Returns the lazy sequence of tokens resulting from lexing all the text in a buffer.
"""
return CalfLexer(io.StringIO(buffer), source, metadata)
def main():
"""A CURSES application for using the lexer."""
from calf.cursedrepl import curse_repl
def handle_buffer(buff, count):
return list(lex_buffer(buff, source=f"<Example {count}>"))
curse_repl(handle_buffer)
| 32.416058 | 100 | 0.635893 |
bc85437feeb63076fa7920a32b626f36134cd617
| 103 |
py
|
Python
|
my_study/mm/getpass_mima.py
|
zhangyage/Python-oldboy
|
a95c1b465929e2be641e425fcb5e15b366800831
|
[
"Apache-2.0"
] | 1 |
2020-06-04T08:44:09.000Z
|
2020-06-04T08:44:09.000Z
|
my_study/mm/getpass_mima.py
|
zhangyage/Python-oldboy
|
a95c1b465929e2be641e425fcb5e15b366800831
|
[
"Apache-2.0"
] | null | null | null |
my_study/mm/getpass_mima.py
|
zhangyage/Python-oldboy
|
a95c1b465929e2be641e425fcb5e15b366800831
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import getpass
pwd = getpass.getpass("请输入密码:")
#列表增删改查
| 11.444444 | 31 | 0.640777 |
d2871b8e017ff780f82b70b3f6ea524dd2d089ca
| 321 |
py
|
Python
|
data_src/city_sight/to_dict.py
|
z1514/OpenRichpedia
|
0ded4b2c9414160b5b39914d43e42168e4d0762c
|
[
"MIT"
] | 31 |
2021-08-29T13:38:17.000Z
|
2022-03-12T04:46:01.000Z
|
data_src/city_sight/to_dict.py
|
z1514/OpenRichpedia
|
0ded4b2c9414160b5b39914d43e42168e4d0762c
|
[
"MIT"
] | null | null | null |
data_src/city_sight/to_dict.py
|
z1514/OpenRichpedia
|
0ded4b2c9414160b5b39914d43e42168e4d0762c
|
[
"MIT"
] | 8 |
2021-08-29T02:26:31.000Z
|
2022-03-10T12:37:19.000Z
|
import json
from os import write
w = open("json.txt","r",encoding="utf-8")
f = open("dict.txt","w",encoding="utf-8")
arr = json.load(w)
s = dict()
f.write('{')
for item in arr:
s[item["value"]] = item["key"]
print(s)
for key in s:
f.write('"'+str(key)+'"'+':')
f.write('"'+str(s[key])+'"'+',\n')
f.write("}")
| 22.928571 | 41 | 0.548287 |
8ec4201f48ece0e9541b6992197c86862ecdb5be
| 5,269 |
py
|
Python
|
run_specific/weakEps.py
|
kolbt/whingdingdilly
|
4c17b594ebc583750fe7565d6414f08678ea7882
|
[
"BSD-3-Clause"
] | 4 |
2017-09-04T14:36:57.000Z
|
2022-03-28T23:24:58.000Z
|
run_specific/weakEps.py
|
kolbt/whingdingdilly
|
4c17b594ebc583750fe7565d6414f08678ea7882
|
[
"BSD-3-Clause"
] | null | null | null |
run_specific/weakEps.py
|
kolbt/whingdingdilly
|
4c17b594ebc583750fe7565d6414f08678ea7882
|
[
"BSD-3-Clause"
] | null | null | null |
'''
# This is an 80 character line #
Purpose: run MONODISPERSE hard spheres to approximate the reentrant effect
observed for soft particles. Note that the actual activity of these particles
is much higher than is being reported (we maintain units in terms of the larger
diameter, sigma=1.0)
'''
# Initial imports
import sys
import os
import psutil
# Read in bash arguments
hoomdPath = "${hoomd_path}" # path to hoomd-blue
gsdPath = "${gsd_path}" # path to gsd
runFor = ${runfor} # simulation length (in tauLJ)
dumpPerBrownian = ${dump_freq} # how often to dump data
pe = ${pe} # activity of A particles
partNum = ${part_num} # total number of particles
intPhi = ${phi} # system area fraction
phi = float(intPhi)/100.0
seed1 = ${seed1} # seed for position
seed2 = ${seed2} # seed for bd equilibration
seed3 = ${seed3} # seed for initial orientations
seed4 = ${seed4} # seed for A activity
# Remaining imports
sys.path.append(hoomdPath)
import hoomd
from hoomd import md
from hoomd import deprecated
import numpy as np
# Set some constants
kT = 1.0 # temperature
threeEtaPiSigma = 1.0 # drag coefficient
sigma = 1.0 # particle diameter
D_t = kT / threeEtaPiSigma # translational diffusion constant
D_r = (3.0 * D_t) / (sigma**2) # rotational diffusion constant
tauBrown = (sigma**2) / D_t # brownian time scale (invariant)
def computeVel(activity):
"Given particle activity, output intrinsic swim speed"
velocity = (activity * sigma) / (3 * (1/D_r))
return velocity
def computeActiveForce(velocity):
"Given particle activity, output repulsion well depth"
activeForce = velocity * threeEtaPiSigma
return activeForce
def computeTauLJ(epsilon):
"Given epsilon, compute lennard-jones time unit"
tauLJ = ((sigma**2) * threeEtaPiSigma) / epsilon
return tauLJ
# Compute parameters from activities
if pe != 0: # A particles are NOT Brownian
v = computeVel(pe)
Fp = computeActiveForce(v)
eps = kT * 0.1
effSig = 1.
else: # A particles are Brownian
v = 0.0
Fp = 0.0
eps = kT * 0.1
effSig = 1.
tauLJ = computeTauLJ(eps) # get LJ time unit
cut = (2**(1./6.)) * effSig # the cutoff for the LJ potential
dt = 0.000001 * tauLJ # timestep size
simLength = runFor * tauBrown # how long to run (in tauBrown)
simTauLJ = simLength / tauLJ # how long to run (in tauLJ)
totTsteps = int(simLength / dt) # how many tsteps to run
numDumps = float(simLength * dumpPerBrownian) # frames in 1 tauB
dumpFreq = float(totTsteps / numDumps) # normalized dump frequency
dumpFreq = int(dumpFreq) # ensure this is an integer
print "Brownian tau in use:", tauBrown
print "Lennard-Jones tau in use:", tauLJ
print "Timestep in use:", dt
print "Epsilon in use:", eps
print "Total number of timesteps:", totTsteps
print "Total number of output frames:", numDumps
print "Dumped snapshots per 1 tauB:", dumpPerBrownian
print "Brownian run time:", simLength
print "Activity:", pe
print "Effective diameter:", effSig
# Initialize system
hoomd.context.initialize()
# We can still use phi_p as input, the radius is assumed to be 0.5
system = hoomd.deprecated.init.create_random(N = partNum,
phi_p = phi,
name = 'A',
min_dist = 0.70,
seed = seed1,
dimensions = 2)
# Assigning groups and lengths to particles
all = hoomd.group.all()
N = len(all)
# Define potential between pairs
nl = hoomd.md.nlist.cell()
lj = hoomd.md.pair.lj(r_cut=cut, nlist=nl)
lj.set_params(mode='shift')
lj.pair_coeff.set('A', 'A', epsilon=eps, sigma=effSig)
# General integration parameters
brownEquil = 100000
hoomd.md.integrate.mode_standard(dt=dt)
hoomd.md.integrate.brownian(group=all, kT=kT, seed=seed2)
hoomd.run(brownEquil)
#set the activity of each type
np.random.seed(seed3) # seed for random orientations
angle = np.random.rand(partNum) * 2 * np.pi # random particle orientation
activity = []
for i in range(0,partNum):
x = (np.cos(angle[i])) * pe
y = (np.sin(angle[i])) * pe
z = 0
tuple = (x, y, z)
activity.append(tuple)
hoomd.md.force.active(group=all,
seed=seed4,
f_lst=activity,
rotation_diff=D_r,
orientation_link=False,
orientation_reverse_link=True)
# Get filenames for various file types
name = "pe" + str(pe) +\
"_ep" + str(eps) +\
"_phi" + str(intPhi)
gsdName = name + ".gsd"
hoomd.dump.gsd(gsdName,
period=dumpFreq,
group=all,
overwrite=False,
phase=-1,
dynamic=['attribute', 'property', 'momentum'])
hoomd.run(totTsteps)
| 34.89404 | 81 | 0.595749 |
01392e82cafcdfcd4ba3cf3ecd58e6acad5702e4
| 323 |
py
|
Python
|
Tests/testSpikeComm.py
|
paccionesawyer/CS133-HRI-RobotDogStudy
|
5ea35245419082b57c2427d63e057f8d187545c7
|
[
"MIT"
] | null | null | null |
Tests/testSpikeComm.py
|
paccionesawyer/CS133-HRI-RobotDogStudy
|
5ea35245419082b57c2427d63e057f8d187545c7
|
[
"MIT"
] | null | null | null |
Tests/testSpikeComm.py
|
paccionesawyer/CS133-HRI-RobotDogStudy
|
5ea35245419082b57c2427d63e057f8d187545c7
|
[
"MIT"
] | null | null | null |
import subprocess
import time
import RPi.GPIO as GPIO
import serial
#Connect to Spike
ser = serial.Serial(
port='/dev/ttyACM0',
baudrate=115200,
parity=serial.PARITY_NONE,
stopbits=serial.STOPBITS_ONE,
bytesize=serial.EIGHTBITS,
timeout=1
)
while True:
print(ser.readline())
time.sleep(1)
| 17.944444 | 33 | 0.708978 |
dc62da9675f5be4309fafd0a12f555549cd2c34a
| 5,640 |
py
|
Python
|
build/pyDcon/VSPaths.py
|
dconnet/AgilityBook
|
4804c79079d6109294a6d377fb6ebda70bcb30a1
|
[
"MIT"
] | 1 |
2020-11-23T20:33:41.000Z
|
2020-11-23T20:33:41.000Z
|
build/pyDcon/VSPaths.py
|
dconnet/AgilityBook
|
4804c79079d6109294a6d377fb6ebda70bcb30a1
|
[
"MIT"
] | null | null | null |
build/pyDcon/VSPaths.py
|
dconnet/AgilityBook
|
4804c79079d6109294a6d377fb6ebda70bcb30a1
|
[
"MIT"
] | 3 |
2020-05-04T19:42:26.000Z
|
2022-03-08T09:36:54.000Z
|
# coding=utf-8
# Above line is for python
#
# GetCompilerPaths(c)
# c: vc9, vc9x64, etc...
# returns tuple (vcDir, vcvarsall cmd, platformDir, platform)
# baseDir, baseDir+r'\VC\vcvarsall.bat target', vcNNN, x64/x86
#
# 2020-11-28 Make target names case insensitive.
# 2020-09-13 Changed Win32 target to x86
# 2019-02-28 Add vc142 support
# 2018-11-16 Add ARM support
# 2017-09-19 Rename vc15 to vc141, fix GetCompilerPaths tuple name
# 2017-04-07 Reverted after installing 15063 SDK (didn't happen in VS update)
# Fixed GetX64Target to work with vs2017.
# 2017-04-06 Added 10.0.14393.0 SDK to VS2017 env (for now).
# 2017-01-24 Added platform into return tuple.
# 2016-11-22 Added vc141 support, removed vc9, added platformDir to return tuple
# 2016-06-10 Made into library
#
from .GetVSDir import GetVSDir
import os
import sys
def GetTarget(vcBase, bIs64Bit, bIsARM):
# 64bit on 64bit
b64On64 = False
if 'PROCESSOR_ARCHITECTURE' in os.environ and os.environ['PROCESSOR_ARCHITECTURE'] == 'AMD64':
# Note: We used to check for the existence of <vcBase>\VC\bin\amd64.
# VS2017 moved that directory. Just assume that if we're compiling
# for 64bit on 64bit that the user installed that. With current VS,
# that's just done - not like older versions where it was a choice.
b64On64 = True
target = ''
if bIs64Bit and bIsARM:
if b64On64:
target = 'amd64_arm64'
else:
target = 'x86_arm64'
elif bIs64Bit and not bIsARM:
if b64On64:
target = 'amd64'
else:
target = 'x86_amd64'
elif not bIs64Bit and bIsARM:
if b64On64:
target = 'amd64_arm'
else:
target = 'x86_arm'
elif not bIs64Bit and not bIsARM:
if b64On64:
target = 'amd64_x86'
else:
target = 'x86'
return target
def GetCompilerPaths(c, verbose = True):
baseDir = ''
vcvarsall = ''
target = ''
extraargs = ''
platformDir = ''
platform = ''
comp = c.lower()
if comp == 'vc10':
baseDir = GetVSDir("10.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, False, False)
platformDir = 'vc100'
platform = 'x86'
elif comp == 'vc10x64':
baseDir = GetVSDir("10.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, True, False)
platformDir = 'vc100'
platform = 'x64'
elif comp == 'vc11':
baseDir = GetVSDir("11.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, False, False)
platformDir = 'vc110'
platform = 'x86'
elif comp == 'vc11x64':
baseDir = GetVSDir("11.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, True, False)
platformDir = 'vc110'
platform = 'x64'
elif comp == 'vc12':
baseDir = GetVSDir("12.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, False, False)
platformDir = 'vc120'
platform = 'x86'
elif comp == 'vc12x64':
baseDir = GetVSDir("12.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, True, False)
platformDir = 'vc120'
platform = 'x64'
elif comp == 'vc14':
baseDir = GetVSDir("14.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, False, False)
platformDir = 'vc140'
platform = 'x86'
elif comp == 'vc14x64':
baseDir = GetVSDir("14.0")
vcvarsall = baseDir + r'\VC\vcvarsall.bat'
target = GetTarget(baseDir, True, False)
platformDir = 'vc140'
platform = 'x64'
elif comp == 'vc141':
#vcvarsall [arch]
#vcvarsall [arch] [version]
#vcvarsall [arch] [platform_type] [version]
# [arch]: x86 | amd64 | x86_amd64 | x86_arm | x86_arm64 | amd64_x86 | amd64_arm | amd64_arm64
# [platform_type]: {empty} | store | uwp
# [version] : full Windows 10 SDK number (e.g. 10.0.10240.0) or "8.1" to use the Windows 8.1 SDK.
baseDir = GetVSDir("15.0")
vcvarsall = baseDir + r'\VC\Auxiliary\Build\vcvarsall.bat'
target = GetTarget(baseDir, False, False)
# Can target specific SDKs
#extraargs = ' 10.0.14393.0'
platformDir = 'vc141'
platform = 'x86'
elif comp == 'vc141x64':
baseDir = GetVSDir("15.0")
vcvarsall = baseDir + r'\VC\Auxiliary\Build\vcvarsall.bat'
target = GetTarget(baseDir, True, False)
platformDir = 'vc141'
platform = 'x64'
elif comp == 'vc141arm64':
baseDir = GetVSDir("15.0")
vcvarsall = baseDir + r'\VC\Auxiliary\Build\vcvarsall.bat'
target = GetTarget(baseDir, True, True)
platformDir = 'vc141'
platform = 'ARM64'
elif comp == 'vc142':
baseDir = GetVSDir("16.0")
vcvarsall = baseDir + r'\VC\Auxiliary\Build\vcvarsall.bat'
target = GetTarget(baseDir, False, False)
platformDir = 'vc142'
platform = 'x86'
elif comp == 'vc142x64':
baseDir = GetVSDir("16.0")
vcvarsall = baseDir + r'\VC\Auxiliary\Build\vcvarsall.bat'
target = GetTarget(baseDir, True, False)
platformDir = 'vc142'
platform = 'x64'
elif comp == 'vc142arm64':
baseDir = GetVSDir("16.0")
vcvarsall = baseDir + r'\VC\Auxiliary\Build\vcvarsall.bat'
target = GetTarget(baseDir, True, True)
platformDir = 'vc142'
platform = 'ARM64'
else:
if verbose:
print('ERROR (pyDcon/VSPaths): Unknown target: ' + c)
return ('', '', '', '')
if len(baseDir) == 0:
if verbose:
print('ERROR (pyDcon/VSPaths): Unknown target: ' + c)
return ('', '', '', '')
if not os.access(baseDir, os.F_OK):
if verbose:
print('ERROR (pyDcon/VSPaths): "' + baseDir + '" does not exist')
return ('', '', '', '')
if not os.access(vcvarsall, os.F_OK):
if verbose:
print('ERROR (pyDcon/VSPaths): "' + vcvarsall + '" does not exist')
return ('', '', '', '')
return (baseDir, '"' + vcvarsall + '" ' + target + extraargs, platformDir, platform)
if __name__ == '__main__':
sys.exit(0)
| 27.647059 | 99 | 0.665957 |
fe9a1401a1e8485c9815dcc33f961541158c4c21
| 10,622 |
py
|
Python
|
metabench/models/statistics/statistics_recorder.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | null | null | null |
metabench/models/statistics/statistics_recorder.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | 15 |
2018-03-07T21:47:56.000Z
|
2018-05-12T08:45:20.000Z
|
metabench/models/statistics/statistics_recorder.py
|
ComeBertrand/metabench
|
e5eaa32b94239b8fa475eda940b8086eec178cfe
|
[
"MIT"
] | null | null | null |
"""
File: statistics_recorder.py
Author: Come Bertrand
Email: [email protected]
Github: https://github.com/ComeBertrand
Description: Statistics computation tools that will be the result of the
benchmark computation.
"""
import numpy as np
class StatisticsRecorder(object):
"""Compilation of statistics on a benchmark of a metaheuristic run.
Args:
nb_run (int): Number of runs that will be made of a metaheuristic on
the same problem. Strictly positive.
problem (Problem): The problem on which the statistics will be
computed.
metaheuristic (Metaheuristic): The metaheuristic on which the
statistics will be computed.
base_size (int): Base size for the arrays that will hold the data from
the iterations of the metaheuristic. Default is 256. Strictly
positive.
Attributes:
nb_run (int): number of runs on which statistics are compiled.
problem (Problem): The problem on which the statistics will be
computed.
metaheuristic (Metaheuristic): The metaheuristic on which the
statistics will be computed.
nb_iter_per_run (np.array): Array of size 'nb_run' that holds the
number of iteration made by the metaheuristic for each run.
nb_iter_total (int): Total number of iterations made in all the runs.
best_values (nb.array): Array of size 'nb_run' that hold the best
fitness of each run.
best_value (float): Best fitness in all the runs.
worst_value (float): Worst fitness of the best fitnesses computed
at each run.
mean_value (float): Mean best fitness recorded for each run.
std_value (float): Standard deviation on the best fitness of each
run.
best_time_iter (float): Best time (lower is better) of iteration
computation in all the runs. (in s).
worst_time_iter (float): Worst time (lower is better) of iteration
computation in all the runs. (in s).
mean_time_iter (float): Mean time taken by the iteration computation.
(in s.)
std_time_iter (float): Standard deviation of the time taken by the
iterations computation.
best_time_tot (float): Best time (lower is better) of computation of
a full run. (in s).
worst_time_tot (float): Worst time (lower is better) of computation of
a full run. (in s).
mean_time_tot (float): Mean time taken by the full run computation.
(in s).
std_time_tot (float): Standard deviation of the time taken by the
full run computation.
"""
def __init__(self, nb_run, problem, metaheuristic, base_size=256):
if nb_run <= 0:
raise ValueError("The number of runs must be strictly positive")
if base_size <= 0:
raise ValueError("The base size must be strictly positive")
self.problem = problem
self.metaheuristic = metaheuristic
self._nb_iter = np.zeros(nb_run, np.int)
self._nb_iter_tot = 0
self._nb_run = nb_run
self._current_size_value = base_size
self._current_size_time = base_size
# Values records are indexed by runs.
self._values = np.zeros((nb_run, base_size), np.float)
# Iter time records are all in the same array.
self._time = np.zeros(base_size, np.float)
self._time_tot = np.zeros(nb_run, np.float)
def record_iter_stat(self, num_run, best_solution, time_iteration):
"""Record a statistic concerning an iteration.
Args:
num_run (int): Index of the run in which the iteration took place.
best_solution (Solution): Best solution computed at the end of the
iteration. It has to be evaluated.
time_iteration (float): Time in second taken to compute the
iteration.
"""
if best_solution.fitness is None:
raise ValueError("Statistics cannot be recorded on solutions that "
"have not been evaluated.")
if self._nb_iter[num_run] >= self._current_size_value:
self._current_size_value *= 2
self._values.resize((self._nb_run, self._current_size_value))
if self._nb_iter_tot >= self._current_size_time:
self._current_size_time *= 2
self._time.resize((self._current_size_time,))
self._values[num_run][self._nb_iter[num_run]] = best_solution.fitness
self._time[self._nb_iter_tot] = time_iteration
self._nb_iter[num_run] += 1
self._nb_iter_tot += 1
def record_time_computation(self, num_run, time_computation):
"""Record the time taken by a full metaheuristic run.
Args:
num_run (int): Index of the run in which the iteration took place.
time_computation (float): Time in second taken to compute the
full run.
"""
self._time_tot[num_run] = time_computation
@property
def nb_run(self):
return self._nb_run
@property
def nb_iter_per_run(self):
return self._nb_iter
@property
def nb_iter_total(self):
return self._nb_iter_tot
def get_run_nb_iterations(self, run_index):
return self._nb_iter[run_index]
def get_run_values(self, run_index):
return self._values[run_index]
@property
def best_values(self):
return np.array([self._values[i][max_iter - 1] for i, max_iter
in enumerate(self._nb_iter) if max_iter > 0],
np.float)
@property
def best_value(self):
if len(self.best_values):
return np.amin(self.best_values)
return None
@property
def worst_value(self):
if len(self.best_values):
return np.amax(self.best_values)
return None
@property
def mean_value(self):
if len(self.best_values):
return np.mean(self.best_values)
return None
@property
def std_value(self):
if len(self.best_values):
return np.std(self.best_values)
return None
@property
def times_iter(self):
if self._nb_iter_tot:
return self._time[:self._nb_iter_tot]
return None
@property
def best_time_iter(self):
if self._nb_iter_tot:
return np.amin(self._time[:self._nb_iter_tot])
return None
@property
def worst_time_iter(self):
if self._nb_iter_tot:
return np.amax(self._time[:self._nb_iter_tot])
return None
@property
def mean_time_iter(self):
if self._nb_iter_tot:
return np.mean(self._time[:self._nb_iter_tot])
return None
@property
def std_time_iter(self):
if self._nb_iter_tot:
return np.std(self._time[:self._nb_iter_tot])
return None
@property
def time_tots(self):
if np.any(self._time_tot):
return self._time_tot
return None
@property
def best_time_tot(self):
if np.any(self._time_tot):
return np.amin(self._time_tot)
return None
@property
def worst_time_tot(self):
if np.any(self._time_tot):
return np.amax(self._time_tot)
return None
@property
def mean_time_tot(self):
if np.any(self._time_tot):
return np.mean(self._time_tot)
return None
@property
def std_time_tot(self):
if np.any(self._time_tot):
return np.std(self._time_tot)
return None
def __str__(self):
st_c = "|{0}|{1}|{2}|{3}|\n"
line = "".join(["-"]*62) + "\n"
stat_str = ""
stat_str += line
stat_str += ("|{}|\n".format("fitness".center(60)))
stat_str += line
stat_str += ("|{}|{}|{}|{}|\n".format("worst".center(14),
"mean".center(14),
"best".center(14),
"std".center(15)))
stat_str += line
stat_str += (st_c.format(str(self.worst_value).center(14),
str(self.mean_value).center(14),
str(self.best_value).center(14),
str(self.std_value).center(15)))
stat_str += line
stat_str += ("|{}|\n".format("nb_iterations".center(60)))
stat_str += line
stat_str += ("|{}|{}|{}|{}|\n".format("worst".center(14),
"mean".center(14),
"best".center(14),
"std".center(15)))
stat_str += line
stat_str += (st_c.format(str(np.amax(self.nb_iter_per_run)).center(14),
str(np.mean(self.nb_iter_per_run)).center(14),
str(np.amin(self.nb_iter_per_run)).center(14),
str(np.std(self.nb_iter_per_run)).center(15)))
stat_str += line
stat_str += ("|{}|\n".format("time_per_iteration".center(60)))
stat_str += line
stat_str += ("|{}|{}|{}|{}|\n".format("worst".center(14),
"mean".center(14),
"best".center(14),
"std".center(15)))
stat_str += line
stat_str += (st_c.format(str(self.worst_time_iter).center(14),
str(self.mean_time_iter).center(14),
str(self.best_time_iter).center(14),
str(self.std_time_iter).center(15)))
stat_str += line
stat_str += ("|{}|\n".format("time_per_run".center(60)))
stat_str += line
stat_str += ("|{}|{}|{}|{}|\n".format("worst".center(14),
"mean".center(14),
"best".center(14),
"std".center(15)))
stat_str += line
stat_str += (st_c.format(str(self.worst_time_tot).center(14),
str(self.mean_time_tot).center(14),
str(self.best_time_tot).center(14),
str(self.std_time_tot).center(15)))
stat_str += line
return stat_str
| 36.754325 | 79 | 0.564489 |
634e34defdb37edcd7e9295ff3c2a46e9f84b24b
| 3,565 |
py
|
Python
|
huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/revoke_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64 |
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/revoke_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11 |
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-rds/huaweicloudsdkrds/v3/model/revoke_request_body.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24 |
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class RevokeRequestBody:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'db_name': 'str',
'users': 'list[RevokeRequestBodyUsers]'
}
attribute_map = {
'db_name': 'db_name',
'users': 'users'
}
def __init__(self, db_name=None, users=None):
"""RevokeRequestBody - a model defined in huaweicloud sdk"""
self._db_name = None
self._users = None
self.discriminator = None
self.db_name = db_name
self.users = users
@property
def db_name(self):
"""Gets the db_name of this RevokeRequestBody.
数据库名称。
:return: The db_name of this RevokeRequestBody.
:rtype: str
"""
return self._db_name
@db_name.setter
def db_name(self, db_name):
"""Sets the db_name of this RevokeRequestBody.
数据库名称。
:param db_name: The db_name of this RevokeRequestBody.
:type: str
"""
self._db_name = db_name
@property
def users(self):
"""Gets the users of this RevokeRequestBody.
解除授权的用户列表。
:return: The users of this RevokeRequestBody.
:rtype: list[RevokeRequestBodyUsers]
"""
return self._users
@users.setter
def users(self, users):
"""Sets the users of this RevokeRequestBody.
解除授权的用户列表。
:param users: The users of this RevokeRequestBody.
:type: list[RevokeRequestBodyUsers]
"""
self._users = users
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RevokeRequestBody):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 25.464286 | 79 | 0.547826 |
4ca345e786ba4dce2b119db64c53eeca4cbcfb1a
| 43 |
py
|
Python
|
src/kan_alexandria/__version__.py
|
joaodath/kan_alexandria
|
b96ec9caf49a9372af715715275238fbaa10cc02
|
[
"Apache-2.0"
] | null | null | null |
src/kan_alexandria/__version__.py
|
joaodath/kan_alexandria
|
b96ec9caf49a9372af715715275238fbaa10cc02
|
[
"Apache-2.0"
] | null | null | null |
src/kan_alexandria/__version__.py
|
joaodath/kan_alexandria
|
b96ec9caf49a9372af715715275238fbaa10cc02
|
[
"Apache-2.0"
] | null | null | null |
__version__ = '0.0.6'
__release__ = 'beta'
| 14.333333 | 21 | 0.674419 |
359327ff8128820880b0d1cb017ded1bb163e996
| 812 |
py
|
Python
|
tests/unit/forms/widget_tests.py
|
ahmetdaglarbas/e-commerce
|
ff190244ccd422b4e08d7672f50709edcbb6ebba
|
[
"BSD-3-Clause"
] | 1 |
2015-07-16T15:00:25.000Z
|
2015-07-16T15:00:25.000Z
|
tests/unit/forms/widget_tests.py
|
ahmetdaglarbas/e-commerce
|
ff190244ccd422b4e08d7672f50709edcbb6ebba
|
[
"BSD-3-Clause"
] | null | null | null |
tests/unit/forms/widget_tests.py
|
ahmetdaglarbas/e-commerce
|
ff190244ccd422b4e08d7672f50709edcbb6ebba
|
[
"BSD-3-Clause"
] | null | null | null |
import nose
from oscar.forms import widgets
def compare_date_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_date_format(format), expected)
def test_datetime_to_date_format_conversion():
format_testcases = (
('%Y-%m-%d', 'yyyy-mm-dd'),
('%Y-%m-%d %H:%M', 'yyyy-mm-dd'),
)
for format, expected in format_testcases:
yield compare_date_format, format, expected
def compare_time_format(format, expected):
nose.tools.eq_(
widgets.datetime_format_to_js_time_format(format), expected)
def test_datetime_to_time_format_conversion():
format_testcases = (
('%Y-%m-%d %H:%M', 'hh:ii'),
('%H:%M', 'hh:ii'),
)
for format, expected in format_testcases:
yield compare_time_format, format, expected
| 25.375 | 68 | 0.667488 |
9927985cf14808924d914a44478132539a6a2300
| 3,872 |
py
|
Python
|
doc/examples/auto_csfle_example.py
|
blink1073/motor
|
92b4d51ecb7b3aa87d979ed83524b879ec5515e4
|
[
"Apache-2.0"
] | null | null | null |
doc/examples/auto_csfle_example.py
|
blink1073/motor
|
92b4d51ecb7b3aa87d979ed83524b879ec5515e4
|
[
"Apache-2.0"
] | null | null | null |
doc/examples/auto_csfle_example.py
|
blink1073/motor
|
92b4d51ecb7b3aa87d979ed83524b879ec5515e4
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import os
from bson import json_util
from bson.codec_options import CodecOptions
from pymongo.encryption import Algorithm
from pymongo.encryption_options import AutoEncryptionOpts
from motor.motor_asyncio import AsyncIOMotorClient, AsyncIOMotorClientEncryption
async def create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client):
client_encryption = AsyncIOMotorClientEncryption(
kms_providers,
key_vault_namespace,
key_vault_client,
# The CodecOptions class used for encrypting and decrypting.
# This should be the same CodecOptions instance you have configured
# on MotorClient, Database, or Collection. We will not be calling
# encrypt() or decrypt() in this example so we can use any
# CodecOptions.
CodecOptions(),
)
# Create a new data key and json schema for the encryptedField.
# https://dochub.mongodb.org/core/client-side-field-level-encryption-automatic-encryption-rules
data_key_id = await client_encryption.create_data_key(
"local", key_alt_names=["pymongo_encryption_example_1"]
)
schema = {
"properties": {
"encryptedField": {
"encrypt": {
"keyId": [data_key_id],
"bsonType": "string",
"algorithm": Algorithm.AEAD_AES_256_CBC_HMAC_SHA_512_Deterministic,
}
}
},
"bsonType": "object",
}
# Use CANONICAL_JSON_OPTIONS so that other drivers and tools will be
# able to parse the MongoDB extended JSON file.
json_schema_string = json_util.dumps(schema, json_options=json_util.CANONICAL_JSON_OPTIONS)
with open("jsonSchema.json", "w") as file:
file.write(json_schema_string)
async def main():
# The MongoDB namespace (db.collection) used to store the
# encrypted documents in this example.
encrypted_namespace = "test.coll"
# This must be the same master key that was used to create
# the encryption key.
local_master_key = os.urandom(96)
kms_providers = {"local": {"key": local_master_key}}
# The MongoDB namespace (db.collection) used to store
# the encryption data keys.
key_vault_namespace = "encryption.__pymongoTestKeyVault"
key_vault_db_name, key_vault_coll_name = key_vault_namespace.split(".", 1)
# The MotorClient used to access the key vault (key_vault_namespace).
key_vault_client = AsyncIOMotorClient()
key_vault = key_vault_client[key_vault_db_name][key_vault_coll_name]
# Ensure that two data keys cannot share the same keyAltName.
await key_vault.drop()
await key_vault.create_index(
"keyAltNames", unique=True, partialFilterExpression={"keyAltNames": {"$exists": True}}
)
await create_json_schema_file(kms_providers, key_vault_namespace, key_vault_client)
# Load the JSON Schema and construct the local schema_map option.
with open("jsonSchema.json", "r") as file:
json_schema_string = file.read()
json_schema = json_util.loads(json_schema_string)
schema_map = {encrypted_namespace: json_schema}
auto_encryption_opts = AutoEncryptionOpts(
kms_providers, key_vault_namespace, schema_map=schema_map
)
client = AsyncIOMotorClient(auto_encryption_opts=auto_encryption_opts)
db_name, coll_name = encrypted_namespace.split(".", 1)
coll = client[db_name][coll_name]
# Clear old data
await coll.drop()
await coll.insert_one({"encryptedField": "123456789"})
decrypted_doc = await coll.find_one()
print("Decrypted document: %s" % (decrypted_doc,))
unencrypted_coll = AsyncIOMotorClient()[db_name][coll_name]
encrypted_doc = await unencrypted_coll.find_one()
print("Encrypted document: %s" % (encrypted_doc,))
if __name__ == "__main__":
asyncio.run(main())
| 37.960784 | 99 | 0.707903 |
642756ca6c083b4e5cb45a2a0585caf4ec3beaf7
| 3,084 |
py
|
Python
|
tests/lr_schedulers/test_exponential_scheduler.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
tests/lr_schedulers/test_exponential_scheduler.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
tests/lr_schedulers/test_exponential_scheduler.py
|
HiromuHota/emmental
|
eb1e29b3406fc0ac301b2d29e06db5e6774eb9f0
|
[
"MIT"
] | null | null | null |
import logging
import shutil
import torch.nn as nn
import emmental
from emmental import Meta
from emmental.learner import EmmentalLearner
logger = logging.getLogger(__name__)
def test_exponential_scheduler(caplog):
"""Unit test of exponential scheduler"""
caplog.set_level(logging.INFO)
lr_scheduler = "exponential"
dirpath = "temp_test_scheduler"
model = nn.Linear(1, 1)
emmental_learner = EmmentalLearner()
Meta.reset()
emmental.init(dirpath)
# Test step per batch
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"exponential_config": {"gamma": 0.1},
},
}
}
emmental.Meta.update_config(config)
emmental_learner.n_batches_per_epoch = 1
emmental_learner._set_optimizer(model)
emmental_learner._set_lr_scheduler(model)
assert emmental_learner.optimizer.param_groups[0]["lr"] == 10
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 0, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 1) < 1e-5
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 1, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 0.1) < 1e-5
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 2, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 0.01) < 1e-5
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 3, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 0.001) < 1e-5
# Test step per epoch
config = {
"learner_config": {
"n_epochs": 4,
"optimizer_config": {"optimizer": "sgd", "lr": 10},
"lr_scheduler_config": {
"lr_scheduler": lr_scheduler,
"lr_scheduler_step_unit": "epoch",
"exponential_config": {"gamma": 0.1},
},
}
}
emmental.Meta.update_config(config)
emmental_learner.n_batches_per_epoch = 2
emmental_learner._set_optimizer(model)
emmental_learner._set_lr_scheduler(model)
assert emmental_learner.optimizer.param_groups[0]["lr"] == 10
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 0, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 10) < 1e-5
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 1, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 1) < 1e-5
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 2, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 1) < 1e-5
emmental_learner.optimizer.step()
emmental_learner._update_lr_scheduler(model, 3, {})
assert abs(emmental_learner.optimizer.param_groups[0]["lr"] - 0.1) < 1e-5
shutil.rmtree(dirpath)
| 32.125 | 79 | 0.672503 |
17e2747b7e13819f59653089033fed40d62e5df1
| 4,236 |
py
|
Python
|
tyrell/dsl/builder.py
|
Lukas-Dresel/Trinity
|
f8c5c8356acb8142aad626ba7c24e4daa9531089
|
[
"Apache-2.0"
] | 22 |
2019-04-04T14:01:18.000Z
|
2022-01-07T19:42:15.000Z
|
tyrell/dsl/builder.py
|
Lukas-Dresel/Trinity
|
f8c5c8356acb8142aad626ba7c24e4daa9531089
|
[
"Apache-2.0"
] | 3 |
2019-01-26T07:14:35.000Z
|
2019-03-05T16:28:40.000Z
|
tyrell/dsl/builder.py
|
Lukas-Dresel/Trinity
|
f8c5c8356acb8142aad626ba7c24e4daa9531089
|
[
"Apache-2.0"
] | 13 |
2019-03-27T18:37:19.000Z
|
2021-09-23T20:54:44.000Z
|
from typing import Union
import sexpdata
from .node import *
from ..spec import TyrellSpec, Production, EnumType
from ..visitor import GenericVisitor
class ProductionVisitor(GenericVisitor):
_children: List[Node]
def __init__(self, children: List[Node]):
self._children = children
def visit_enum_production(self, prod) -> Node:
return AtomNode(prod)
def visit_param_production(self, prod) -> Node:
return ParamNode(prod)
def visit_function_production(self, prod) -> Node:
return ApplyNode(prod, self._children)
class Builder:
'''A factory class to build AST node'''
_spec: TyrellSpec
def __init__(self, spec: TyrellSpec):
self._spec = spec
def _make_node(self, prod: Production, children: List[Node] = []) -> Node:
return ProductionVisitor(children).visit(prod)
def make_node(self, src: Union[int, Production], children: List[Node] = []) -> Node:
'''
Create a node with the given production index and children.
Raise `KeyError` or `ValueError` if an error occurs
'''
if isinstance(src, int):
return self._make_node(self._spec.get_production_or_raise(src), children)
elif isinstance(src, Production):
# Sanity check first
prod = self._spec.get_production_or_raise(src.id)
if src != prod:
raise ValueError(
'DSL Builder found inconsistent production instance')
return self._make_node(prod, children)
else:
raise ValueError(
'make_node() only accepts int or production, but found {}'.format(src))
def make_enum(self, name: str, value: str) -> Node:
'''
Convenient method to create an enum node.
Raise `KeyError` or `ValueError` if an error occurs
'''
ty = self.get_type_or_raise(name)
prod = self.get_enum_production_or_raise(ty, value)
return self.make_node(prod.id)
def make_param(self, index: int) -> Node:
'''
Convenient method to create a param node.
Raise `KeyError` or `ValueError` if an error occurs
'''
prod = self.get_param_production_or_raise(index)
return self.make_node(prod.id)
def make_apply(self, name: str, args: List[Node]) -> Node:
'''
Convenient method to create an apply node.
Raise `KeyError` or `ValueError` if an error occurs
'''
prod = self.get_function_production_or_raise(name)
return self.make_node(prod.id, args)
def _from_sexp(self, sexp) -> Node:
if not isinstance(sexp, list) or len(sexp) < 2 or not isinstance(sexp[0].value(), str):
# None of our nodes serializes to atom
msg = 'Cannot parse sexp into dsl.Node: {}'.format(sexp)
raise ValueError(msg)
sym = sexp[0].value()
# First check for param node
if sym == '@param':
index = int(sexp[1])
return self.make_param(index)
# Next, check for atom node
ty = self.get_type(sym)
if ty is not None and ty.is_enum():
if isinstance(sexp[1], list):
# Could be a enum list
value = [str(x) for x in sexp[1]]
return self.make_enum(ty.name, value)
else:
value = str(sexp[1])
return self.make_enum(ty.name, value)
# Finally, check for apply node
args = [self._from_sexp(x) for x in sexp[1:]]
return self.make_apply(sym, args)
def from_sexp_string(self, sexp_str: str) -> Node:
'''
Convenient method to create an AST from an sexp string.
Raise `KeyError` or `ValueError` if an error occurs
'''
try:
sexp = sexpdata.loads(sexp_str)
# This library is liberal on its exception raising...
except Exception as e:
raise ValueError('Sexp parsing error: {}'.format(e))
return self._from_sexp(sexp)
# For convenience, expose all methods in TyrellSpec so that the client do not need to keep a reference of it
def __getattr__(self, attr):
return getattr(self._spec, attr)
| 35.3 | 112 | 0.609065 |
e05e114bb7117e1f576ed07e643cc281b9408dd5
| 804 |
py
|
Python
|
pyshop/pyshop/urls.py
|
Fahad-Hafeez/PyShop
|
825e55e4da9b9661f91562669c9b2599531fdc3c
|
[
"Apache-2.0"
] | null | null | null |
pyshop/pyshop/urls.py
|
Fahad-Hafeez/PyShop
|
825e55e4da9b9661f91562669c9b2599531fdc3c
|
[
"Apache-2.0"
] | null | null | null |
pyshop/pyshop/urls.py
|
Fahad-Hafeez/PyShop
|
825e55e4da9b9661f91562669c9b2599531fdc3c
|
[
"Apache-2.0"
] | null | null | null |
"""pyshop URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('products/', include('products.urls'))
]
| 36.545455 | 77 | 0.706468 |
2092626a4b4d74dcc28ae9ce862b5fd3fd636b69
| 6,206 |
py
|
Python
|
data_steward/cdr_cleaner/cleaning_rules/missing_concept_record_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 16 |
2017-06-30T20:05:05.000Z
|
2022-03-08T21:03:19.000Z
|
data_steward/cdr_cleaner/cleaning_rules/missing_concept_record_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 342 |
2017-06-23T21:37:40.000Z
|
2022-03-30T16:44:16.000Z
|
data_steward/cdr_cleaner/cleaning_rules/missing_concept_record_suppression.py
|
lrwb-aou/curation
|
e80447e56d269dc2c9c8bc79e78218d4b0dc504c
|
[
"MIT"
] | 33 |
2017-07-01T00:12:20.000Z
|
2022-01-26T18:06:53.000Z
|
"""
Remove records that contain concept_ids that do not belong in the vocabulary.
Original Issues: DC1601
"""
# Python imports
import logging
# Third party imports
from google.cloud.exceptions import GoogleCloudError
# Project imports
from cdr_cleaner.cleaning_rules.deid.concept_suppression import AbstractBqLookupTableConceptSuppression
from constants.cdr_cleaner import clean_cdr as cdr_consts
from common import JINJA_ENV, CDM_TABLES
from utils import pipeline_logging
from resources import get_concept_id_fields
LOGGER = logging.getLogger(__name__)
SUPPRESSION_RULE_CONCEPT_TABLE = 'missing_vocabulary_concepts'
CREATE_OR_REPLACE_CLAUSE = JINJA_ENV.from_string("""
CREATE OR REPLACE TABLE `{{project_id}}.{{sandbox_id}}.{{concept_suppression_lookup_table}}` AS
{{query}}
""")
MISSING_CONCEPTS_QUERY = JINJA_ENV.from_string("""
SELECT DISTINCT
t.{{concept_id_field}} concept_id
FROM `{{project_id}}.{{dataset_id}}.{{tablename}}` t
LEFT JOIN `{{project_id}}.{{dataset_id}}.concept` c
ON c.concept_id = t.{{concept_id_field}}
WHERE c.concept_id IS NULL
AND (t.{{concept_id_field}} IS NOT NULL AND t.{{concept_id_field}} <> 0)
""")
class MissingConceptRecordSuppression(AbstractBqLookupTableConceptSuppression):
def __init__(self,
project_id,
dataset_id,
sandbox_dataset_id,
table_namer=''):
"""
Initialize the class with proper information.
Set the issue numbers, description and affected datasets. As other tickets may affect
this SQL, append them to the list of Jira Issues.
DO NOT REMOVE ORIGINAL JIRA ISSUE NUMBERS!
"""
desc = "Remove records that contain concept_ids that do not exist in the vocabulary."
super().__init__(
issue_numbers=['DC1601'],
description=desc,
affected_datasets=[cdr_consts.COMBINED],
affected_tables=CDM_TABLES,
project_id=project_id,
dataset_id=dataset_id,
sandbox_dataset_id=sandbox_dataset_id,
concept_suppression_lookup_table=SUPPRESSION_RULE_CONCEPT_TABLE,
table_namer=table_namer)
def get_missing_concepts(self, client, tables):
queries = []
union_distinct = "\nUNION DISTINCT\n"
for table in tables:
concept_id_fields = get_concept_id_fields(table)
concept_id_fields = [
field for field in concept_id_fields
if 'source_concept_id' not in field
]
for concept_id_field in concept_id_fields:
query = MISSING_CONCEPTS_QUERY.render(
project_id=self.project_id,
dataset_id=self.dataset_id,
tablename=table,
concept_id_field=concept_id_field)
queries.append(query)
unioned_queries = union_distinct.join(queries)
concept_suppression_lookup_query = CREATE_OR_REPLACE_CLAUSE.render(
project_id=self.project_id,
sandbox_id=self.sandbox_dataset_id,
concept_suppression_lookup_table=self.
concept_suppression_lookup_table,
query=unioned_queries)
query_job = client.query(concept_suppression_lookup_query)
result = query_job.result()
if hasattr(result, 'errors') and result.errors:
LOGGER.error(f"Error running job {result.job_id}: {result.errors}")
raise GoogleCloudError(
f"Error running job {result.job_id}: {result.errors}")
def create_suppression_lookup_table(self, client):
"""
Build the concept suppression lookup table
:param client: Bigquery client
:return:
"""
self.get_missing_concepts(client, self.affected_tables)
def setup_validation(self, client, *args, **keyword_args):
"""
Run required steps for validation setup
Method to run to setup validation on cleaning rules that will be updating or deleting the values.
For example:
if your class updates all the datetime fields you should be implementing the
logic to get the initial list of values which adhere to a condition we are looking for.
if your class deletes a subset of rows in the tables you should be implementing
the logic to get the row counts of the tables prior to applying cleaning rule
"""
raise NotImplementedError("Please fix me.")
def validate_rule(self, client, *args, **keyword_args):
"""
Validates the cleaning rule which deletes or updates the data from the tables
Method to run validation on cleaning rules that will be updating the values.
For example:
if your class updates all the datetime fields you should be implementing the
validation that checks if the date time values that needs to be updated no
longer exists in the table.
if your class deletes a subset of rows in the tables you should be implementing
the validation that checks if the count of final final row counts + deleted rows
should equals to initial row counts of the affected tables.
Raises RunTimeError if the validation fails.
"""
raise NotImplementedError("Please fix me.")
if __name__ == '__main__':
import cdr_cleaner.args_parser as parser
import cdr_cleaner.clean_cdr_engine as clean_engine
ARGS = parser.parse_args()
pipeline_logging.configure(level=logging.DEBUG, add_console_handler=True)
if ARGS.list_queries:
clean_engine.add_console_logging()
query_list = clean_engine.get_query_list(
ARGS.project_id, ARGS.dataset_id, ARGS.sandbox_dataset_id,
[(MissingConceptRecordSuppression,)])
for query in query_list:
LOGGER.info(query)
else:
clean_engine.add_console_logging(ARGS.console_log)
clean_engine.clean_dataset(ARGS.project_id, ARGS.dataset_id,
ARGS.sandbox_dataset_id,
[(MissingConceptRecordSuppression,)])
| 37.385542 | 105 | 0.672897 |
839ac3ce9db060bad7f334a21904c8fc05075aff
| 44,949 |
py
|
Python
|
snkrfinder/model/cvae.py
|
ergonyc/snkrfinder
|
d8ddc6f20cf9c1ac2eec460f0e7bab9ab03c6791
|
[
"Apache-2.0"
] | null | null | null |
snkrfinder/model/cvae.py
|
ergonyc/snkrfinder
|
d8ddc6f20cf9c1ac2eec460f0e7bab9ab03c6791
|
[
"Apache-2.0"
] | null | null | null |
snkrfinder/model/cvae.py
|
ergonyc/snkrfinder
|
d8ddc6f20cf9c1ac2eec460f0e7bab9ab03c6791
|
[
"Apache-2.0"
] | null | null | null |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02c_model.cvae.ipynb (unless otherwise specified).
__all__ = ['prep_df_for_datablocks', 'get_ae_btfms', 'get_ae_no_aug', 'TensorPoint', 'Tensor2Vect', 'LatentsTensor',
'df_get_x', 'df_get_y', 'LatentsTensorBlock', 'df_ae_x', 'df_ae_y', 'LatentTupleBlock', 'get_ae_DataBlock',
'UpsampleBlock', 'LatentLayer', 'AEEncoder', 'AEDecoder', 'build_AE_encoder', 'build_AE_decoder', 'AE',
'AELoss', 'MyMetric', 'L1LatentReg', 'KLD', 'KLDiv', 'L2MeanMetric', 'L1MeanMetric', 'L2Metric', 'L1Metric',
'L2BMeanMetric', 'L1BMeanMetric', 'KLWeightMetric', 'RawKLDMetric', 'WeightedKLDMetric', 'MuMetric',
'MuSDMetric', 'StdMetric', 'StdSDMetric', 'LogvarMetric', 'LogvarSDMetric', 'default_AE_metrics',
'short_AE_metrics', 'AnnealedLossCallback', 'default_KL_anneal_in', 'bn_splitter', 'resnetVAE_split',
'AE_split', 'get_conv_parts', 'get_pretrained_parts', 'get_encoder_parts', 'VAELinear', 'VAELayer', 'BVAE',
'BVAELoss', 'default_VAE_metrics', 'short_VAE_metrics', 'gaussian_kernel', 'MMD', 'rawMMD', 'MMDVAE',
'MaxMeanDiscrepancy', 'MMDLoss', 'MMDMetric', 'short_MMEVAE_metrics', 'default_MMEVAE_metrics',
'UpsampleResBlock', 'get_resblockencoder_parts', 'ResBlockAEDecoder', 'build_ResBlockAE_decoder',
'ResBlockAE']
# Cell
from ..imports import *
from ..core import *
from ..data import *
from .core import *
#from snkrfinder.model.transfer import *
from fastai.test_utils import show_install, synth_learner, nvidia_smi, nvidia_mem
# Cell
def prep_df_for_datablocks(df):
df = df[["path","train","test","validate","t_t_v","Category"]].copy()
# I could remove all the "test" rows... for now i'll choose an alternate strategy:
# Drop all the "test" rows for now, and create an "is_valid" column...
# should probably drop a ton of columns to jus tkeep the file paths...
# just keep what we'll need below
df.loc[:,'is_valid'] = df.test | df.validate
df.loc[:,'og_idx'] = df.index
return df
# Cell
def get_ae_btfms(stats = 'sneaker'):
# could use globals IM_STATS['sneaker'] and IM_STATS['imagenet']
im_stats = ([.5,.5,.5],[.5,.5,.5]) if stats == 'sneaker' else imagenet_stats
batch_tfms = Normalize.from_stats(*im_stats)
#batch_tfms = Normalize.from_stats([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])
rand_tfms = aug_transforms(mult=1.0,
do_flip=True,
flip_vert=False,
max_rotate=5.0,
min_zoom=.95,
max_zoom=1.0,
max_lighting=0.1,
max_warp=0.1,
p_affine=0.66,
p_lighting=0.2,
xtra_tfms=None,
size=None,
mode='bilinear',
pad_mode='border',
align_corners=True,
batch=False,
min_scale=1.0)
return rand_tfms+[batch_tfms]
def get_ae_no_aug(stats = 'sneaker'):
im_stats = ([.5,.5,.5],[.5,.5,.5]) if stats == 'sneaker' else imagenet_stats
batch_tfms = Normalize.from_stats(*im_stats)
return [batch_tfms]
# Cell
# NO CLUE WHY WE NEED TO HAVE THIS.... copied
class TensorPoint(TensorBase):
"Basic type for points in an image"
_show_args = dict(s=10, marker='.', c='r')
@classmethod
def create(cls, t, img_size=None)->None:
"Convert an array or a list of points `t` to a `Tensor`"
return cls(tensor(t).view(-1, 2).float(), img_size=img_size)
def show(self, ctx=None, **kwargs):
if 'figsize' in kwargs: del kwargs['figsize']
x = self.view(-1,2)
ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
return ctx
class Tensor2Vect(TensorPoint): pass
# TODO: instantiate a show method
class LatentsTensor(Tensor2Vect):
"Basic type for latents as Tensor inheriting from TensorPoint (vectors)"
@classmethod
def create(cls, ts, img_size=IMG_SIZE):
"create IMG_SIZE attr to register plotting..."
if isinstance(ts,tuple):
mu,logvar = ts
elif ts is None:
mu,logvar = None,None
else:
mu = None
logvar = None
if mu is None: mu = torch.empty(0)
elif not isinstance(mu, Tensor): Tensor(mu)
if logvar is None: logvar = torch.empty(0)
elif not isinstance(logvar,Tensor): Tensor(logvar)
t = torch.cat([mu,logvar],dim=-1) # in case its a batch?
return cls(tensor(t).view(-1, 2).float(), img_size=img_size)
# def show(self, ctx=None, **kwargs):
# if 'figsize' in kwargs: del kwargs['figsize']
# x = self.view(-1,2)
# ctx.scatter(x[:, 0], x[:, 1], **{**self._show_args, **kwargs})
# return ctx
# mu,logvar = self
# if not isinstance(mu, Tensor) or not isinstance(logvar,Tensor): return ctx
# title_str = f"mu-> {mu.mean():e}, {mu.std():e} logvar->{logvar.mean():e}, {logvar.std():e}"
# if 'figsize' in kwargs: del kwargs['figsize']
# if 'title' in kwargs: kwargs['title']=title_str
# if ctx is None:
# _,axs = plt.subplots(1,2, figsize=(12,6))
# x=torch.linspace(0,1,mu[0].shape[0])
# axs[0].scatter(x, mu[:], **{**self._show_args, **kwargs})
# axs[1].scatter(x, logvar[:], **{**self._show_args, **kwargs})
# ctx = axs[1]
# ctx.scatter(mu[:], logvar[:], **{**self._show_args, **kwargs})
# return ctx
# Cell
def df_get_x(r):
"datablock df helper for VAE Block using `LatentTuple`"
return L_ROOT/'data/raw'/r['path']
def df_get_y(r):
"datablock df helper for VAE Block using `LatentTuple`"
return (df_get_x(r),None,None)
# Cell
def LatentsTensorBlock():
"Class wrapper for the AE `LatentTensor` Block"
return TransformBlock(type_tfms=LatentsTensor.create, batch_tfms=noop)
def df_ae_x(r,im_path=L_ROOT/"data/raw"):
"Autoencoder LatentsTensorBlock datablock df helper"
return im_path/r['path']
# need to make sure that we get the image whihc is "Identical" to the input.. how to test?
def df_ae_y(r):
"The target is the same as the input for AE"# lambda o: o
return df_ae_x(r)
#export
# could we do a typedispatch to manage the transforms...?
# def VAETargetTupleBlock():
# return TransformBlock(type_tfms=VAETargetTuple.create, batch_tfms=IntToFloatTensor)
def LatentTupleBlock():
"Class wrapper for the AE `LatentTuple` Block (depricated)"
return TransformBlock(type_tfms=LatentTuple.create, batch_tfms=noop)
# Cell
#
def get_ae_DataBlock(aug=True,im_path=L_ROOT/"data/raw",stats = 'sneaker',im_size=IMG_SIZE):
"wrapper to get the standard AE datablock with `ImageBlock`,`LatentTensor` target"
# use partials or a class wrapper to get around this yucky hack
# global image_path
# image_path = im_path
mytfms = get_ae_btfms(stats=stats) if aug else get_ae_no_aug(stats=stats)
block = DataBlock(blocks=(ImageBlock(cls=PILImage), ImageBlock(cls=PILImage), LatentsTensorBlock ),
get_x=df_ae_x,
get_y=[df_ae_y, noop], #don't need to get the LatentsTensorBlock, just create
splitter=ColSplitter('is_valid'),
item_tfms= FeatsResize(im_size,method='pad', pad_mode='border'),
batch_tfms = mytfms,
n_inp = 1)
return block
# Cell
class UpsampleBlock(Module):
def __init__(self, up_in_c:int, final_div:bool=True, blur:bool=False, **kwargs):
"""
Upsampling using PixelShuffle_INCR and ConvLayer
- up_in_c : "Upsample input channel"
"""
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, **kwargs)
ni = up_in_c//2
nf = ni if final_div else ni//2
self.conv1 = ConvLayer(ni, nf, **kwargs) # since we'll apply it by hand...
self.conv2 = ConvLayer(nf, nf, **kwargs)
def forward(self, up_in:Tensor) -> Tensor:
up_out = self.shuf(up_in)
return self.conv2(self.conv1(up_out))
# Cell
class LatentLayer(Module):
"""
This layer encodes the latent "bottleneck" and is constructed to work with the specified VAE DataBlock be a replacement for
the variational (reparameter trick) layer for otherwise identical architecture
"""
def __init__(self,in_features,latent_features):
"""
Compose a linear latent layer such that the mechanics are equivalent to the VAE
the "dummy" can be used for a shaddow logvar track a KLD estimate divergence
from latent gaussian prior
compute the variance across batchs for each latent feature as the dummy_var
"""
self.latent = nn.Linear(in_features,latent_features)
def forward(self,h):
z = self.latent(h)
#dummy_var = (z.var(dim=1).unsqueeze(-1).expand(z.size()) ) #variance across latent dim for each image
dummy_var = (z.var(dim=0).unsqueeze(0).expand(z.size()) ) #latent variance across batch
dummy_mu = z
return z, dummy_mu, dummy_var.log()
#return z, torch.zeros_like(z)
# Cell
class AEEncoder(Module):
def __init__(self,arch_body,enc_dim, hidden_dim=None, im_size=IMG_SIZE):
"""
arch_body list of layers (e.g. arch.children()[:cut])
enc_dim,
hidden_dim. number of linear features to sandwich between the feature encoder and the latent layers
"""
arch = arch_body + [Flatten()]
if hidden_dim: # i.e. is not None
arch += [nn.Linear(enc_dim,hidden_dim)]
# [LinBnDrop(enc_dim,hidden_dim,bn=True,p=0.0,act=nn.ReLU(),lin_first=True)]
self.encoder = nn.Sequential(*arch)
store_attr('enc_dim,hidden_dim')
def forward(self, x):
return self.encoder(x)
#### TODO: refactor this to take a "BLOCK" input so we can have either UpsampleBlocks or ResBlockUpsampleBlocks
class AEDecoder(Module):
def __init__(self, hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
"""
Decoder Module made of `UpsampleBlock`s returning the latent representation back into an "image"
latent_dim - dimension of latent representation
hidden_dim - optional additional linear layer between the latent and decoder
im_size - passed to make sure we are scaling back to the right size
out_range - ensures the output is on teh same scale as the _normalized_ input image
"""
#decoder
n_blocks = 5
BASE = im_size//2**5
hidden = im_size*BASE*BASE if hidden_dim is None else hidden_dim
z_fc = [nn.Linear(latent_dim,hidden)] # [LinBnDrop(latent_dim,hidden,bn=True,p=0.0,act=nn.ReLU(),lin_first=True)]
if hidden_dim: # i.e. is not None
z_fc += [nn.Linear(hidden,im_size*BASE*BASE)] # should the hidden layer have activationa and/or batchnorm?
#z_fc += [LinBnDrop(hidden,im_size*n_blocks*n_blocks,bn=True,p=0.0,act=nn.ReLU(),lin_first=True)]
nfs = [3] + [2**i*BASE for i in range(n_blocks+1)]
nfs.reverse()
n = len(nfs)
modules = [UpsampleBlock(nfs[i]) for i in range(n - 2)]
self.decoder = nn.Sequential(*z_fc,
ResizeBatch(im_size,BASE,BASE),
*modules,
ConvLayer(nfs[-2],nfs[-1],
ks=1,padding=0, norm_type=None, #act_cls=nn.Sigmoid) )
act_cls=partial(SigmoidRange, *out_range)))
store_attr('latent_dim, hidden_dim,im_size,out_range')
def forward(self, z):
return self.decoder(z)
# Cell
def build_AE_encoder(arch_body,enc_dim, hidden_dim=None, im_size=IMG_SIZE):
"wrapper to sequential-ize AEEncoder class"
encoder = AEEncoder(arch_body,enc_dim=enc_dim, hidden_dim=hidden_dim, im_size=im_size)
return nn.Sequential(*list(encoder.children()))
def build_AE_decoder(hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
"wrapper to sequential-ize AEDecoder class"
decoder = AEDecoder(hidden_dim=hidden_dim, latent_dim=latent_dim, im_size=im_size,out_range=out_range)
return nn.Sequential(*list(decoder.children()))
# Cell
class AE(Module):
def __init__(self,enc_parts,hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
"""
inputs:
arch, cut,pretrained
enc_dim
latent_dim
hidden_dim
"""
enc_arch,enc_feats,name = enc_parts
BASE = im_size//2**5
enc_dim = enc_feats * BASE**2 # 2**(3*3) * (im_size//32)**2 #(output of resneet) #12800
#encoder
self.encoder = build_AE_encoder(enc_arch,enc_dim=enc_dim, hidden_dim=hidden_dim, im_size=im_size)
in_dim = enc_dim if hidden_dim is None else hidden_dim
# AE Bottleneck
self.bn = LatentLayer(in_dim,latent_dim)
#decoder
self.decoder = build_AE_decoder(hidden_dim=hidden_dim, latent_dim=latent_dim, im_size=im_size,out_range=out_range)
store_attr('name,enc_dim, in_dim,hidden_dim,latent_dim,im_size,out_range') # do i need all these?
def decode(self, z):
return self.decoder(z)
def encode(self, x):
h = self.encoder(x)
return self.bn(h)
def forward(self, x):
#z, mu, logvar = self.encode(x)
# h = self.encoder(x)
# z, mu, logvar = self.bn(h) # reparam happens in the VAE layer
# x_hat = self.decoder(z)
z,mu,logvar = self.encode(x) # z and mu are the same for
x_hat = self.decode(z)
latents = torch.stack([mu,logvar],dim=-1)
return x_hat, latents # assume dims are [batch,latent_dim,concat_dim]
# Cell
# class L1LatentReg(Module):
# """
# add alpha?
# """
# def __init__(self, batchmean=False):
# """
# reduction 'sum', else 'batchmean'
# """
# l_one = self._L1mean if batchmean else self._L1
# store_attr('batchmean,l_one')
# def _L1(self, a):
# return a.abs().sum()
# def _L1mean(self, a):
# return a.abs().sum(dim=1).mean()
# def forward(self,z):
# return self.l_one(z)
class AELoss(Module):
"""
wrapper for loss_func which deals with potential annealed kl_weight
does MSE with 'mean' reduction
'batchmean' averages as 'sum' MSE over batches
simple L1 regularizer on latent dimension
"""
def __init__(self, batchmean=False, alpha=1.0,useL1=False):
"""
reduction 'sum'
"""
pix_loss = MSELossFlat(reduction='sum') if not useL1 else L1LossFlat(reduction='sum')
store_attr('pix_loss,alpha,batchmean')
def l_one_reg(self,pix_dim,z):
l_one = z.abs().sum()
l_one *= (3*pix_dim*pix_dim)/z.size()[1]
return l_one
def forward(self, preds, *target):
"""
pred =(x_hat,KLD,kl_weight) #mu,log_var, kl_weight)
target is x (original)
"""
# this handles the annealed kl_weight and passing the mu,logvar around we added...
if(len(preds) == 3):
x_hat, latents, _ = preds
else: #if len(preds) == 2: # we should never get here... unless we delete teh callback
x_hat, latents = preds
z, _ = latents.split(1,dim=2)
bs = latents.size()[0]
#note: both mse and l1_reg are summing errors over batches, and pixels or latents
pix_err = self.pix_loss(x_hat, target[0])
pix_dim = x_hat.size()[-1]
l1_reg = self.l_one_reg(pix_dim,z)
total = pix_err + self.alpha*l1_reg
total *= (1./bs) if self.batchmean else 1.0
return total
# Cell
class MyMetric(Metric):
"meta-class for simple average over epoch metric quantities"
def reset(self):
"Clear all targs and preds"
self.vals = []
@property
def value(self):
return np.array(self.vals).mean()
class L1LatentReg(MyMetric):
"Latent Regularizer with sum reduction and optinal batchmean scaling"
def __init__(self,batchmean=False,alpha=1.0):
vals = []
store_attr('vals,batchmean,alpha')
def accumulate(self, learn):
# pix_dim = to_detach(learn.y[0].size()[-1])
latents = to_detach(learn.pred[1])
bs = latents.size()[0]
z, _ = latents.split(1,dim=2)
#nll = torch.abs(recon_x - x).mean()
l_one = z.abs().sum()
# l_one *= (3*pix_dim*pix_dim)/z.size()[1]
l_one *= (self.alpha/bs) if self.batchmean else self.alpha
self.vals.append(l_one)
# Cell
def KLD(mu,logvar):
"KLD helper which sum across latents, but not batches"
return -0.5 * torch.sum(1 + logvar - mu*mu - logvar.exp(),1)
class KLDiv(Module):
"""
Module for computing the KL Divergence from a unit normal distribution.
'batchmean' option sums first and averages over batches
"""
def __init__(self, batchmean=False):
"""
reduction 'sum', else 'batchmean'
"""
store_attr('batchmean')
def __KLD(self,mu,logvar):
"KLD helper which sum across latents, but not batches"
return -0.5 * torch.sum(1 + logvar - mu*mu - logvar.exp(),1)
def forward(self, mu, logvar):
"""
pred =(x_hat,KLD,kl_weight) #mu,log_var, kl_weight)
target is x (original)
"""
kld = self.__KLD(mu,logvar)
kld = kld.mean() if self.batchmean else kld.sum()
return kld
# Cell
class L2MeanMetric(MyMetric):
"Mean square error"
def __init__(self): self.vals = []
def accumulate(self, learn):
x = to_detach(learn.y[0])
recon_x = to_detach(learn.pred[0])
nll = (recon_x - x).pow(2).mean()
#nll = torch.mean((recon_x - x)**2)
self.vals.append(nll)
class L1MeanMetric(MyMetric):
"Mean absolute error"
def __init__(self): self.vals = []
def accumulate(self, learn):
x = to_detach(learn.y[0])
recon_x = to_detach(learn.pred[0])
#nll = torch.abs(recon_x - x).mean()
nll = (recon_x - x).abs().mean()
self.vals.append(nll)
class L2Metric(MyMetric):
"Sum square error"
def __init__(self): self.vals = []
def accumulate(self, learn):
x = to_detach(learn.y[0])
recon_x = to_detach(learn.pred[0])
nll = (recon_x - x).pow(2).sum()
#nll = torch.mean((recon_x - x)**2)
self.vals.append(nll)
class L1Metric(MyMetric):
"Sum absolute error"
def __init__(self): self.vals = []
def accumulate(self, learn):
x = to_detach(learn.y[0])
recon_x = to_detach(learn.pred[0])
#nll = torch.abs(recon_x - x).mean()
nll = (recon_x - x).abs().sum()
self.vals.append(nll)
class L2BMeanMetric(MyMetric):
"Summed square error average across batch "
def __init__(self): self.vals = []
def accumulate(self, learn):
x = to_detach(learn.y[0])
recon_x = to_detach(learn.pred[0])
nll = (recon_x - x).pow(2).sum(dim=[1,2,3]).mean()
#nll = torch.mean((recon_x - x)**2)
self.vals.append(nll)
class L1BMeanMetric(MyMetric):
"Summed abs error average across batch "
def __init__(self): self.vals = []
def accumulate(self, learn):
x = to_detach(learn.y[0])
recon_x = to_detach(learn.pred[0])
#nll = torch.abs(recon_x - x).mean()
nll = (recon_x - x).abs().sum(dim=[1,2,3]).mean()
self.vals.append(nll)
class KLWeightMetric(MyMetric):
"Injected KLD weighting"
def __init__(self): self.vals = []
def accumulate(self, learn):
#kl = learn.model.kl_weight
kl = learn.opt.hypers[0]['kl_weight']
self.vals.append(to_detach(kl))
class RawKLDMetric(MyMetric):
"KLD Metric, `batchmean` averages across batches"
def __init__(self,batchmean=False):
vals = []
_KLD = KLDiv(batchmean=batchmean)
store_attr('vals,batchmean,_KLD')
def accumulate(self, learn):
latents = learn.pred[1]
mu, logvar = latents.split(1,dim=2)
kld = self._KLD(mu,logvar)
self.vals.append(to_detach(kld))
class WeightedKLDMetric(MyMetric):
"""weighted KLD Metric, `batchmean` averages across batches
the "effective" KLD regularization in e.g. a 𝜷-BAE
"""
def __init__(self,batchmean=False,alpha=1.0):
vals = []
_KLD = KLDiv(batchmean=batchmean)
store_attr('vals,batchmean,alpha,_KLD')
def accumulate(self, learn):
latents = learn.pred[1]
mu, logvar = latents.split(1,dim=2)
kld = self.alpha*self._KLD(mu,logvar)
self.vals.append(to_detach(kld))
# latents = to_detach(learn.pred[1])
# mu, logvar = latents.split(1,dim=2)
# kld = _KLD(mu,logvar).mean() if self.batchmean else _KLD(mu,logvar).sum()
# self.vals.append(self.alpha*kld)
class MuMetric(MyMetric):
"average latent value (e.g. avg(`mu`)"
def __init__(self): self.vals = []
def accumulate(self, learn):
latents = to_detach(learn.pred[1])
mu, logvar = latents.split(1,dim=2)
self.vals.append(mu.mean())
class MuSDMetric(MyMetric):
"standard deviation of latent 𝝁 value (e.g. std(`mu`) )"
def __init__(self): self.vals = []
def accumulate(self, learn):
latents = to_detach(learn.pred[1])
mu, logvar = latents.split(1,dim=2)
self.vals.append(mu.std())
class StdMetric(MyMetric):
"average of latent 𝝈 value (e.g. std(exp(.5*`logvar`) )"
def __init__(self): self.vals = []
def accumulate(self, learn):
latents = learn.pred[1]
mu, logvar = latents.split(1,dim=2)
std = torch.exp(0.5 * logvar).mean()
self.vals.append(to_detach(std))
class StdSDMetric(MyMetric):
"standard deviation of latent 𝝈 value (e.g. std(exp(.5*`logvar`) )"
def __init__(self): self.vals = []
def accumulate(self, learn):
latents = learn.pred[1]
mu, logvar = latents.split(1,dim=2)
std = torch.exp(0.5 * logvar).std()
self.vals.append(to_detach(std))
class LogvarMetric(MyMetric):
"average of latent log(𝝈*𝝈) value (e.g. mean(`logvar`))"
def __init__(self): self.vals = []
def accumulate(self, learn):
latents = to_detach(learn.pred[1])
mu, logvar = latents.split(1,dim=2)
self.vals.append(logvar.mean())
class LogvarSDMetric(MyMetric):
"standard deviation of latent log(𝝈*𝝈) value (e.g. std(`logvar`)"
def __init__(self): self.vals = []
def accumulate(self, learn):
latents = to_detach(learn.pred[1])
mu, logvar = latents.split(1,dim=2)
self.vals.append(logvar.std())
# Cell
def default_AE_metrics(alpha,batchmean,useL1):
"long-ish default list of metrics for the AE"
first = L2BMeanMetric() if batchmean else L2MeanMetric()
second = L1BMeanMetric() if batchmean else L2MeanMetric()
if useL1: first,second = second,first
metrics = [first,
L1LatentReg(batchmean=batchmean,alpha=alpha),
MuMetric(),
StdMetric(),
LogvarMetric(),
second,
WeightedKLDMetric(batchmean=batchmean,alpha=alpha),
MuSDMetric(),
LogvarSDMetric(),
]
return metrics
def short_AE_metrics(alpha,batchmean,useL1):
"short default list of metrics for the AE"
first = L2BMeanMetric() if batchmean else L2MeanMetric()
second = L1BMeanMetric() if batchmean else L2MeanMetric()
if useL1: first,second = second,first
metrics = [first,
L1LatentReg(batchmean=batchmean,alpha=alpha),
MuMetric(),
]
return metrics
# Cell
class AnnealedLossCallback(Callback):
"injects `kl_weight` for access during loss function calculation"
def after_pred(self):
kl_weight = self.learn.pred[0].new(1)
kl_weight[0] = self.opt.hypers[0]['kl_weight'] if 'kl_weight' in self.opt.hypers[0].keys() else 1.0
self.learn.pred = self.learn.pred + (kl_weight,)
def after_batch(self):
pred, latents, _ = self.learn.pred
self.learn.pred = (pred,latents)
def default_KL_anneal_in():
"reasonable default for 'warming up' the KL Div"
return combine_scheds([ .7, .3], [SchedCos(0,1), SchedNo(1,1)])
# Cell
def bn_splitter(m):
"splits all the batchnorm layers out"
def _bn_splitter(l, g1, g2):
if isinstance(l, nn.BatchNorm2d): g2 += l.parameters()
elif hasattr(l, 'weight'): g1 += l.parameters()
for ll in l.children(): _bn_splitter(ll, g1, g2)
g1,g2 = [],[]
_bn_splitter(m[0], g1, g2)
g2 += m[1:].parameters()
return g1,g2
def resnetVAE_split(m):
"simple splitter to freeze the non batch norm pre-trained encoder"
to_freeze, dont_freeze = bn_splitter(m.encoder)
#return L(to_freeze, dont_freeze + params(m.bn)+params(m.dec[:2]), params(m.dec[2:]))
return L(to_freeze, dont_freeze + params(m.bn)+params(m.decoder))
#return L(fz, nofz + params(m.bn)+params(m.dec[:6]), params(m.dec[6:]))
def AE_split(m):
"generic splitter for my AE classes- BVAE & AE & MMDVAE."
to_freeze, dont_freeze = bn_splitter(m.encoder)
return L(to_freeze, dont_freeze + params(m.bn)+params(m.decoder))
# Cell
#### TODO: refactor this to take a "BLOCK" input so we can have either ConvLayer or ResBlock pieces
def get_conv_parts(im_size=IMG_SIZE):
"""
make a simple convolutional ladder encoder
"""
n_blocks = 5
BASE = im_size//2**5
nfs = [3]+[(2**i)*BASE for i in range(n_blocks)]
n = len(nfs)
modules = [ConvLayer(nfs[i],nfs[i+1],
ks=5,stride=2,padding=2) for i in range(n - 1)]
return modules,nfs[-1],'vanilla'
def get_pretrained_parts(arch=resnet18):
"this works for mobilnet_v2, resnet, and xresnet"
cut = model_meta[arch]['cut']
name = arch.__name__
arch = arch(pretrained=True)
enc_arch = list(arch.children())[:cut]
enc_feats = 512
return enc_arch, enc_feats, name
def get_encoder_parts(enc_type='vanilla',im_size=IMG_SIZE):
encoder_parts = get_conv_parts(im_size=im_size) if isinstance(enc_type,str) else get_pretrained_parts(arch=enc_type)
return encoder_parts # returns enc_arch,enc_dim,arch.__name__
# Cell
class VAELinear(Module):
"maps hidden (input) features to two latents (mu and logvar)"
def __init__(self,in_features,latent_features):
self.mu_linear = nn.Linear(in_features,latent_features)
self.logvar_linear = nn.Linear(in_features,latent_features)
def forward(self,h):
#h = self.fc_in(h)
return self.mu_linear(h), self.logvar_linear(h)
class VAELayer(Module):
"""
The VAE : in_features to latent_features through
the "Variational" magic: "reparamaterization trick"
"""
def __init__(self,in_features,latent_features):
self.mu_logvar = VAELinear(in_features,latent_features)
#
def reparam(self,mu,logvar):
# should we pass through a deterministic code when not training?
if False: return mu # self.training
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
z = mu + eps * std
return z
def forward(self,h):
mu,logvar = self.mu_logvar(h)
#logvar = F.softplus(logvar) # force logvar>0
z = self.reparam(mu,logvar) # adds the noise by the reparam trick
return z, mu, logvar
# Cell
### TODO: refactor the BVAE and AE to a single architecture... with a "sample" function ot
class BVAE(AE):
"""
simple VAE made with an encoder passed in, and some builder function for the Latent (VAE reparam trick) and decoder
"""
def __init__(self,enc_parts,hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
"""
inputs:
enc_arch (pre-cut / pretrained)
enc_dim
latent_dim
hidden_dim
im_size,out_range
"""
enc_arch,enc_feats,name = enc_parts
# encoder
# arch,cut = xresnet18(pretrained=True),-4
# enc_arch = list(arch.children())[:cut]
BASE = im_size//2**5
enc_dim = enc_feats * BASE**2 # 2**(3*3) * (im_size//32)**2 #(output of resneet) #12800
self.encoder = build_AE_encoder(enc_arch,enc_dim=enc_dim, hidden_dim=hidden_dim, im_size=im_size)
in_dim = enc_dim if hidden_dim is None else hidden_dim
# VAE Bottleneck
self.bn = VAELayer(in_dim,latent_dim)
#decoder
self.decoder = build_AE_decoder(hidden_dim=hidden_dim, latent_dim=latent_dim, im_size=im_size,out_range=out_range)
store_attr('name,enc_dim, in_dim,hidden_dim,latent_dim,im_size,out_range') # do i need all these?
# def decode(self, z):
# return self.decoder(z)
# def encode(self, x):
# h = self.encoder(x)
# z, mu, logvar = self.bn(h) # reparam happens in the VAE layer
# return z, mu, logvar
# def forward(self, x):
# #z, mu, logvar = self.encode(x)
# # h = self.encoder(x)
# # z, mu, logvar = self.bn(h) # reparam happens in the VAE layer
# # x_hat = self.decoder(z)
# z,mu,logvar = self.encode(x)
# x_hat = self.decode(z)
# latents = torch.stack([mu,logvar],dim=-1)
# return x_hat, latents # assume dims are [batch,latent_dim,concat_dim]
# # AE
# def decode(self, z):
# return self.decoder(z)
# def encode(self, x):
# h = self.encoder(x)
# return self.bn(h)
# def forward(self, x):
# """
# pass the "latents" out to keep the learn mechanics consistent...
# """
# h = self.encoder(x)
# z,logvar = self.bn(h)
# x_hat = self.decoder(z)
# latents = torch.stack([z,logvar] ,dim=-1)
# return x_hat , latents
# Cell
# called `after_batch`
class BVAELoss(Module):
"""
Measures how well we have created the original image,
plus the KL Divergence with the unit normal distribution
batchmean option sums first and averages over batches (for smaller total error magnitudes.. cosmentic)
"""
def __init__(self, batchmean=False, alpha=1.0,useL1=False):
"""
reduction 'sum', else 'batchmean'
"""
pix_loss = MSELossFlat(reduction='sum') if not useL1 else L1LossFlat(reduction='sum')
_KLD = KLDiv(batchmean=False) # force to full sum
store_attr('pix_loss,alpha,batchmean,_KLD')
def forward(self, preds, *target):
"""
pred =(x_hat,KLD,kl_weight) #mu,log_var, kl_weight)
target is x (original)
"""
# this handles the annealed kl_weight and passing the mu,logvar around we added...
if(len(preds) == 3):
x_hat, latents, kl_weight = preds
else: #if len(preds) == 2: # we should never get here... unless we delete the callback
x_hat, latents = preds
kl_weight = x_hat[0].new(1)
kl_weight[0] = 1.0
mu, logvar = latents.split(1,dim=2)
#note: both mse and KLD are summing errors over batches, and pixels or latents
pix_err = self.pix_loss(x_hat, target[0])
kld_err = self.alpha * self._KLD(mu,logvar).sum() #_KLD doesn't sum over batches by default
total = (pix_err + kld_err*kl_weight)
if self.batchmean: total *= (1./mu.size()[0])
return total
# Cell
def default_VAE_metrics(alpha,batchmean,useL1):
"long default list of metrics for the VAE"
first = L2BMeanMetric() if batchmean else L2Metric()
second = L1BMeanMetric() if batchmean else L1Metric()
if useL1: first,second = second,first
metrics = [first,
MuMetric(),
StdMetric(),
LogvarMetric(),
WeightedKLDMetric(batchmean=batchmean,alpha=alpha),
KLWeightMetric(),
second,
MuSDMetric(),
StdSDMetric(),
LogvarSDMetric(),
]
return metrics
def short_VAE_metrics(alpha,batchmean,useL1):
"short default list of metrics for the AE"
first = L2BMeanMetric() if batchmean else L2MeanMetric()
second = L1BMeanMetric() if batchmean else L2MeanMetric()
if useL1: first,second = second,first
metrics = [first,
MuMetric(),
StdMetric(),
LogvarMetric(),
WeightedKLDMetric(batchmean=batchmean,alpha=alpha)
]
return metrics
# Cell
def gaussian_kernel(a, b):
"helper for computing MMD"
dim1_1, dim1_2 = a.shape[0], b.shape[0]
depth = a.shape[1]
a = a.view(dim1_1, 1, depth)
b = b.view(1, dim1_2, depth)
a_core = a.expand(dim1_1, dim1_2, depth)
b_core = b.expand(dim1_1, dim1_2, depth)
numerator = (a_core - b_core).pow(2).mean(2)/depth
return torch.exp(-numerator)
def MMD(a, b):
"Max Mean Discrepancy"
return gaussian_kernel(a, a).mean() + gaussian_kernel(b, b).mean() - 2*gaussian_kernel(a, b).mean()
def rawMMD(a, b):
"_raw_ values from gauss kernals, assuming that and b have the same shape"
return gaussian_kernel(a, a) + gaussian_kernel(b, b) - 2*gaussian_kernel(a, b)
# the MMDVAE is built on the basic AE archiecure
class MMDVAE(AE): pass
class MaxMeanDiscrepancy(Module):
"""
MMD
add alpha?
"""
def __init__(self, batchmean=False):
"""
reduction 'mean', else 'batchmean' means only over batch
"""
MMD = self._MMDsum if batchmean else self._MMDmean
store_attr('batchmean,MMD')
def _gaus_ker(self,a, b):
"gaussian kernal"
dim1_1, dim1_2 = a.shape[0], b.shape[0]
depth = a.shape[1]
numerator = 1.0/depth
a = a.view(dim1_1, 1, depth)
b = b.view(1, dim1_2, depth)
a_core = a.expand(dim1_1, dim1_2, depth)
b_core = b.expand(dim1_1, dim1_2, depth)
a_m_b = a_core - b_core
numerator *= (a_m_b*a_m_b).mean(2)
#numerator = (a_core - b_core).pow(2).mean(2) /depth
return torch.exp(-numerator)
def _rawMMD(self, a, b):
return self._gaus_ker(a, a) + self._gaus_ker(b, b) - 2*self._gaus_ker(a, b)
def _MMDmean(self, a, b):
return self._rawMMD( a, b).mean()
def _MMDsum(self, a, b):
return self._rawMMD( a, b).sum()
def forward(self,true_samples, latent):
# bs = latents.size()[0]
# latent_dim = z.size()[1]
# true_samples = torch.randn((bs,latent_dim), requires_grad=False).cuda()
mmd = self.MMD(true_samples, latent)
return mmd
class MMDLoss(Module):
"""
Measures mean square error of prediction and original image,
regularized by MMD.
Note: using reuction = 'mean' because it keeps the regularization relatively potent (i.e. pixels>>latents)
"""
def __init__(self, batchmean=False, alpha=1.0,useL1=False):
"""
reduction 'sum', else 'batchmean'
"""
if batchmean:
pix_loss = MSELossFlat(reduction='sum') if not useL1 else L1LossFlat(reduction='sum')
#mmd = _MMDsum
else:
pix_loss = MSELossFlat(reduction='mean') if not useL1 else L1LossFlat(reduction='mean')
#mmd = _MMD
mmd = MaxMeanDiscrepancy(batchmean=batchmean)
store_attr('pix_loss,alpha,batchmean,mmd')
def forward(self, preds, *target):
"""
pred =(x_hat,KLD,kl_weight) #mu,log_var, kl_weight)
target is x (original)
"""
# this handles the annealed kl_weight and passing the mu,logvar around we added...
if(len(preds) == 3):
x_hat, latents, kl_weight = preds
else: #if len(preds) == 2: # we should never get here... unless we delete teh callback
x_hat, latents = preds
kl_weight = x_hat[0].new(1)
kl_weight[0] = 1.0
z, _ = latents.split(1,dim=2)
#note: both mse and KLD are summing errors over batches, and pixels or latents
pix_err = self.pix_loss(x_hat, target[0])
bs = latents.size()[0]
latent_dim = z.size()[1]
true_samples = torch.randn((bs,latent_dim), requires_grad=False).cuda()
mmd_loss = self.mmd(true_samples, z) * self.alpha
total = (pix_err + mmd_loss*kl_weight)
total *= (1./bs) if self.batchmean else 1.0
return total
class MMDMetric(MyMetric):
def __init__(self,batchmean=False,alpha=1.0):
vals = []
#mmd = _MMDsum if batchmean else _MMD
mmd = MaxMeanDiscrepancy(batchmean=batchmean)
store_attr('vals,batchmean,alpha,mmd')
def accumulate(self, learn):
latents = learn.pred[1]
z, _ = latents.split(1,dim=2)
bs = latents.size()[0]
latent_dim = z.size()[1]
true_samples = torch.randn((bs,latent_dim), requires_grad=False).cuda()
mmd_loss = self.mmd(true_samples, z)
mmd_loss *= (self.alpha/bs) if self.batchmean else self.alpha
self.vals.append(to_detach(mmd_loss))
# export
def short_MMEVAE_metrics(alpha,batchmean,useL1):
"short list of metrics for the VAE"
first = L2BMeanMetric() if batchmean else L2MeanMetric()
second = L1BMeanMetric() if batchmean else L1MeanMetric()
if useL1: first,second = second,first
metrics = [first,
MMDMetric(batchmean=batchmean,alpha=alpha),
MuMetric(),
MuSDMetric(),
]
return metrics
def default_MMEVAE_metrics(alpha,batchmean,useL1):
"long default list of metrics for the VAE"
first = L2BMeanMetric() if batchmean else L2MeanMetric()
second = L1BMeanMetric() if batchmean else L1MeanMetric()
if useL1: first,second = second,first
metrics = [first,
MMDMetric(batchmean=batchmean,alpha=alpha),
MuMetric(),
StdMetric(),
second,
MuSDMetric(),
LogvarMetric(),
L1LatentReg(batchmean=batchmean,alpha=alpha),
WeightedKLDMetric(batchmean=batchmean,alpha=alpha),
LogvarSDMetric()]
return metrics
# Cell
class UpsampleResBlock(Module):
def __init__(self, up_in_c:int, final_div:bool=True, blur:bool=False, **kwargs):
"""
Upsampling using PixelShuffle_INCR and ResBlocks
- up_in_c : "Upsample input channel"
"""
self.shuf = PixelShuffle_ICNR(up_in_c, up_in_c//2, blur=blur, **kwargs)
ni = up_in_c//2
nf = ni if final_div else ni//2
self.conv1 = ResBlock(1,ni, nf, **kwargs) # since we'll apply it by hand...
self.conv2 = ResBlock(1,nf, nf, **kwargs)
def forward(self, up_in:Tensor) -> Tensor:
up_out = self.shuf(up_in)
return self.conv2(self.conv1(up_out))
def get_resblockencoder_parts(enc_type='vanilla',im_size=IMG_SIZE):
"""
make a simple (hence 'vanilla') convolutional ladder encoder with ResBlock parts
"""
n_blocks = 5
BASE = im_size//2**5
nfs = [3]+[(2**i)*BASE for i in range(n_blocks)]
n = len(nfs)
modules = [ResBlock(1, nfs[i],nfs[i+1],
stride=2, act_cls=Mish) for i in range(n - 1)]
return modules,nfs[-1],'resblock'
# def build_ResBlockAE_decoder(hidden_dim=2048, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
# BASE = im_size//2**5
# #store_attr('enc_dim,latent_dim, hidden_dim,im_size')
# #decoder
# n_blocks = 5
# nfs = [3] + [2**i*n_blocks for i in range(n_blocks+1)]
# nfs.reverse()
# n = len(nfs)
# modules = [UpsampleResBlock(nfs[i]) for i in range(n - 2)]
# decoder = nn.Sequential( LinBnDrop(latent_dim,hidden_dim,
# bn=True,# batch normalizaiton shouldn't be a problem here
# p=0.0,act=nn.ReLU(),lin_first=True),
# LinBnDrop(hidden_dim,im_size*n_blocks*n_blocks,
# bn=True,# batch normalizaiton shouldn't be a problem here
# p=0.0,act=nn.ReLU(),lin_first=True),
# ResizeBatch(im_size,n_blocks,n_blocks),
# *modules,
# ResBlock(1,nfs[-2],nfs[-1],
# ks=1,padding=0, norm_type=None, #act_cls=nn.Sigmoid) )
# act_cls=partial(SigmoidRange, *out_range)))
# return decoder
class ResBlockAEDecoder(Module):
def __init__(self, hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
"""
Decoder Module made of ResBlocks returning the latent representation back into an "image"
latent_dim - dimension of latent representation
hidden_dim - optional additional linear layer between the latent and decoder
im_size - passed to make sure we are scaling back to the right size
out_range - ensures the output is on teh same scale as the _normalized_ input image
"""
#decoder
n_blocks = 5
BASE = im_size//2**5
hidden = im_size*BASE*BASE if hidden_dim is None else hidden_dim
z_fc = [nn.Linear(latent_dim,hidden)]
if hidden_dim: # i.e. is not None
z_fc += [nn.Linear(hidden,im_size*BASE*BASE)]
nfs = [3] + [2**i*BASE for i in range(n_blocks+1)]
nfs.reverse()
n = len(nfs)
modules = [UpsampleResBlock(nfs[i]) for i in range(n - 2)]
self.decoder = nn.Sequential(*z_fc,
ResizeBatch(im_size,BASE,BASE),
*modules,
ResBlock(1,nfs[-2],nfs[-1],
ks=1,padding=0, norm_type=None, #act_cls=nn.Sigmoid) )
act_cls=partial(SigmoidRange, *out_range)))
store_attr('latent_dim, hidden_dim,im_size,out_range')
def forward(self, z):
z = self.decoder(z)
return z
def build_ResBlockAE_decoder(hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE):
"wrapper to sequential-ize ResBlockAEDecoder class"
decoder = ResBlockAEDecoder(hidden_dim=hidden_dim, latent_dim=latent_dim, im_size=im_size,out_range=out_range)
return nn.Sequential(*list(decoder.children()))
class ResBlockAE(AE):
def __init__(self,enc_parts,hidden_dim=None, latent_dim=128, im_size=IMG_SIZE,out_range=OUT_RANGE,isVAE=False):
"""
inputs:
enc_parts - encoder architecture
latent_dim - dimension of latent representation
hidden_dim - optional additional linear layer between the latent and decoder
im_size - passed to make sure we are scaling back to the right size
out_range - ensures the output is on teh same scale as the _normalized_ input image
isVae - switch for the type of latent representation
"""
enc_arch,enc_feats,name = enc_parts
BASE = im_size//2**5
enc_dim = enc_feats * BASE**2 # 2**(3*3) * (im_size//32)**2 #(output of resneet) #12800
#encoder
self.encoder = build_AE_encoder(enc_arch,enc_dim=enc_dim, hidden_dim=hidden_dim, im_size=im_size)
in_dim = enc_dim if hidden_dim is None else hidden_dim
# AE Bottleneck
latent = VAELayer if isVAE else LatentLayer
self.bn = latent(in_dim,latent_dim)
#decoder
self.decoder = build_ResBlockAE_decoder(hidden_dim=hidden_dim, latent_dim=latent_dim, im_size=im_size,out_range=out_range)
store_attr('name,enc_dim, in_dim,hidden_dim,latent_dim,im_size,out_range') # do i need all these?
# def decode(self, z):
# return self.decoder(z)
# def encode(self, x):
# h = self.encoder(x)
# return self.bn(h)
# def forward(self, x):
# """
# pass the "latents" out to keep the learn mechanics consistent...
# """
# h = self.encoder(x)
# z,logvar = self.bn(h)
# x_reconst = self.decoder(z)
# latents = torch.stack([z,logvar] ,dim=-1)
# return x_reconst , latents
| 32.761662 | 130 | 0.6078 |
a2650f14eeff11acfb48e5fc2ae1f58d08729539
| 14,951 |
py
|
Python
|
google/ads/google_ads/v3/proto/resources/bidding_strategy_pb2.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/proto/resources/bidding_strategy_pb2.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | null | null | null |
google/ads/google_ads/v3/proto/resources/bidding_strategy_pb2.py
|
andy0937/google-ads-python
|
cb5da7f4a75076828d1fc3524b08cc167670435a
|
[
"Apache-2.0"
] | 1 |
2020-03-13T00:14:31.000Z
|
2020-03-13T00:14:31.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/ads/googleads_v3/proto/resources/bidding_strategy.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.ads.google_ads.v3.proto.common import bidding_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2
from google.ads.google_ads.v3.proto.enums import bidding_strategy_status_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_bidding__strategy__status__pb2
from google.ads.google_ads.v3.proto.enums import bidding_strategy_type_pb2 as google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_bidding__strategy__type__pb2
from google.api import resource_pb2 as google_dot_api_dot_resource__pb2
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='google/ads/googleads_v3/proto/resources/bidding_strategy.proto',
package='google.ads.googleads.v3.resources',
syntax='proto3',
serialized_options=_b('\n%com.google.ads.googleads.v3.resourcesB\024BiddingStrategyProtoP\001ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\242\002\003GAA\252\002!Google.Ads.GoogleAds.V3.Resources\312\002!Google\\Ads\\GoogleAds\\V3\\Resources\352\002%Google::Ads::GoogleAds::V3::Resources'),
serialized_pb=_b('\n>google/ads/googleads_v3/proto/resources/bidding_strategy.proto\x12!google.ads.googleads.v3.resources\x1a\x32google/ads/googleads_v3/proto/common/bidding.proto\x1a\x41google/ads/googleads_v3/proto/enums/bidding_strategy_status.proto\x1a?google/ads/googleads_v3/proto/enums/bidding_strategy_type.proto\x1a\x19google/api/resource.proto\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1cgoogle/api/annotations.proto\"\x89\x07\n\x0f\x42iddingStrategy\x12\x15\n\rresource_name\x18\x01 \x01(\t\x12\'\n\x02id\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12*\n\x04name\x18\x04 \x01(\x0b\x32\x1c.google.protobuf.StringValue\x12^\n\x06status\x18\x0f \x01(\x0e\x32N.google.ads.googleads.v3.enums.BiddingStrategyStatusEnum.BiddingStrategyStatus\x12X\n\x04type\x18\x05 \x01(\x0e\x32J.google.ads.googleads.v3.enums.BiddingStrategyTypeEnum.BiddingStrategyType\x12\x33\n\x0e\x63\x61mpaign_count\x18\r \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12?\n\x1anon_removed_campaign_count\x18\x0e \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x43\n\x0c\x65nhanced_cpc\x18\x07 \x01(\x0b\x32+.google.ads.googleads.v3.common.EnhancedCpcH\x00\x12?\n\ntarget_cpa\x18\t \x01(\x0b\x32).google.ads.googleads.v3.common.TargetCpaH\x00\x12X\n\x17target_impression_share\x18\x30 \x01(\x0b\x32\x35.google.ads.googleads.v3.common.TargetImpressionShareH\x00\x12\x41\n\x0btarget_roas\x18\x0b \x01(\x0b\x32*.google.ads.googleads.v3.common.TargetRoasH\x00\x12\x43\n\x0ctarget_spend\x18\x0c \x01(\x0b\x32+.google.ads.googleads.v3.common.TargetSpendH\x00:h\xea\x41\x65\n(googleads.googleapis.com/BiddingStrategy\x12\x39\x63ustomers/{customer}/biddingStrategies/{bidding_strategy}B\x08\n\x06schemeB\x81\x02\n%com.google.ads.googleads.v3.resourcesB\x14\x42iddingStrategyProtoP\x01ZJgoogle.golang.org/genproto/googleapis/ads/googleads/v3/resources;resources\xa2\x02\x03GAA\xaa\x02!Google.Ads.GoogleAds.V3.Resources\xca\x02!Google\\Ads\\GoogleAds\\V3\\Resources\xea\x02%Google::Ads::GoogleAds::V3::Resourcesb\x06proto3')
,
dependencies=[google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_bidding__strategy__status__pb2.DESCRIPTOR,google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_bidding__strategy__type__pb2.DESCRIPTOR,google_dot_api_dot_resource__pb2.DESCRIPTOR,google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
_BIDDINGSTRATEGY = _descriptor.Descriptor(
name='BiddingStrategy',
full_name='google.ads.googleads.v3.resources.BiddingStrategy',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='resource_name', full_name='google.ads.googleads.v3.resources.BiddingStrategy.resource_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='google.ads.googleads.v3.resources.BiddingStrategy.id', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='google.ads.googleads.v3.resources.BiddingStrategy.name', index=2,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='status', full_name='google.ads.googleads.v3.resources.BiddingStrategy.status', index=3,
number=15, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='google.ads.googleads.v3.resources.BiddingStrategy.type', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='campaign_count', full_name='google.ads.googleads.v3.resources.BiddingStrategy.campaign_count', index=5,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='non_removed_campaign_count', full_name='google.ads.googleads.v3.resources.BiddingStrategy.non_removed_campaign_count', index=6,
number=14, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='enhanced_cpc', full_name='google.ads.googleads.v3.resources.BiddingStrategy.enhanced_cpc', index=7,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_cpa', full_name='google.ads.googleads.v3.resources.BiddingStrategy.target_cpa', index=8,
number=9, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_impression_share', full_name='google.ads.googleads.v3.resources.BiddingStrategy.target_impression_share', index=9,
number=48, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_roas', full_name='google.ads.googleads.v3.resources.BiddingStrategy.target_roas', index=10,
number=11, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='target_spend', full_name='google.ads.googleads.v3.resources.BiddingStrategy.target_spend', index=11,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=_b('\352Ae\n(googleads.googleapis.com/BiddingStrategy\0229customers/{customer}/biddingStrategies/{bidding_strategy}'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
_descriptor.OneofDescriptor(
name='scheme', full_name='google.ads.googleads.v3.resources.BiddingStrategy.scheme',
index=0, containing_type=None, fields=[]),
],
serialized_start=375,
serialized_end=1280,
)
_BIDDINGSTRATEGY.fields_by_name['id'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_BIDDINGSTRATEGY.fields_by_name['name'].message_type = google_dot_protobuf_dot_wrappers__pb2._STRINGVALUE
_BIDDINGSTRATEGY.fields_by_name['status'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_bidding__strategy__status__pb2._BIDDINGSTRATEGYSTATUSENUM_BIDDINGSTRATEGYSTATUS
_BIDDINGSTRATEGY.fields_by_name['type'].enum_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_enums_dot_bidding__strategy__type__pb2._BIDDINGSTRATEGYTYPEENUM_BIDDINGSTRATEGYTYPE
_BIDDINGSTRATEGY.fields_by_name['campaign_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_BIDDINGSTRATEGY.fields_by_name['non_removed_campaign_count'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_BIDDINGSTRATEGY.fields_by_name['enhanced_cpc'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2._ENHANCEDCPC
_BIDDINGSTRATEGY.fields_by_name['target_cpa'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2._TARGETCPA
_BIDDINGSTRATEGY.fields_by_name['target_impression_share'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2._TARGETIMPRESSIONSHARE
_BIDDINGSTRATEGY.fields_by_name['target_roas'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2._TARGETROAS
_BIDDINGSTRATEGY.fields_by_name['target_spend'].message_type = google_dot_ads_dot_googleads__v3_dot_proto_dot_common_dot_bidding__pb2._TARGETSPEND
_BIDDINGSTRATEGY.oneofs_by_name['scheme'].fields.append(
_BIDDINGSTRATEGY.fields_by_name['enhanced_cpc'])
_BIDDINGSTRATEGY.fields_by_name['enhanced_cpc'].containing_oneof = _BIDDINGSTRATEGY.oneofs_by_name['scheme']
_BIDDINGSTRATEGY.oneofs_by_name['scheme'].fields.append(
_BIDDINGSTRATEGY.fields_by_name['target_cpa'])
_BIDDINGSTRATEGY.fields_by_name['target_cpa'].containing_oneof = _BIDDINGSTRATEGY.oneofs_by_name['scheme']
_BIDDINGSTRATEGY.oneofs_by_name['scheme'].fields.append(
_BIDDINGSTRATEGY.fields_by_name['target_impression_share'])
_BIDDINGSTRATEGY.fields_by_name['target_impression_share'].containing_oneof = _BIDDINGSTRATEGY.oneofs_by_name['scheme']
_BIDDINGSTRATEGY.oneofs_by_name['scheme'].fields.append(
_BIDDINGSTRATEGY.fields_by_name['target_roas'])
_BIDDINGSTRATEGY.fields_by_name['target_roas'].containing_oneof = _BIDDINGSTRATEGY.oneofs_by_name['scheme']
_BIDDINGSTRATEGY.oneofs_by_name['scheme'].fields.append(
_BIDDINGSTRATEGY.fields_by_name['target_spend'])
_BIDDINGSTRATEGY.fields_by_name['target_spend'].containing_oneof = _BIDDINGSTRATEGY.oneofs_by_name['scheme']
DESCRIPTOR.message_types_by_name['BiddingStrategy'] = _BIDDINGSTRATEGY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
BiddingStrategy = _reflection.GeneratedProtocolMessageType('BiddingStrategy', (_message.Message,), dict(
DESCRIPTOR = _BIDDINGSTRATEGY,
__module__ = 'google.ads.googleads_v3.proto.resources.bidding_strategy_pb2'
,
__doc__ = """A bidding strategy.
Attributes:
resource_name:
The resource name of the bidding strategy. Bidding strategy
resource names have the form: ``customers/{customer_id}/biddi
ngStrategies/{bidding_strategy_id}``
id:
The ID of the bidding strategy.
name:
The name of the bidding strategy. All bidding strategies
within an account must be named distinctly. The length of
this string should be between 1 and 255, inclusive, in UTF-8
bytes, (trimmed).
status:
The status of the bidding strategy. This field is read-only.
type:
The type of the bidding strategy. Create a bidding strategy by
setting the bidding scheme. This field is read-only.
campaign_count:
The number of campaigns attached to this bidding strategy.
This field is read-only.
non_removed_campaign_count:
The number of non-removed campaigns attached to this bidding
strategy. This field is read-only.
scheme:
The bidding scheme. Only one can be set.
enhanced_cpc:
A bidding strategy that raises bids for clicks that seem more
likely to lead to a conversion and lowers them for clicks
where they seem less likely.
target_cpa:
A bidding strategy that sets bids to help get as many
conversions as possible at the target cost-per-acquisition
(CPA) you set.
target_impression_share:
A bidding strategy that automatically optimizes towards a
desired percentage of impressions.
target_roas:
A bidding strategy that helps you maximize revenue while
averaging a specific target Return On Ad Spend (ROAS).
target_spend:
A bid strategy that sets your bids to help get as many clicks
as possible within your budget.
""",
# @@protoc_insertion_point(class_scope:google.ads.googleads.v3.resources.BiddingStrategy)
))
_sym_db.RegisterMessage(BiddingStrategy)
DESCRIPTOR._options = None
_BIDDINGSTRATEGY._options = None
# @@protoc_insertion_point(module_scope)
| 64.167382 | 2,003 | 0.790917 |
9087ea119282e62a36ef2014624cd3e608c47ce4
| 2,046 |
py
|
Python
|
datawire/model/forms.py
|
arc64/datawi.re
|
adb95e974ee776617530348ead735db7b623273c
|
[
"MIT"
] | 2 |
2016-04-09T23:36:32.000Z
|
2016-07-18T23:27:34.000Z
|
datawire/model/forms.py
|
arc64/datawi.re
|
adb95e974ee776617530348ead735db7b623273c
|
[
"MIT"
] | null | null | null |
datawire/model/forms.py
|
arc64/datawi.re
|
adb95e974ee776617530348ead735db7b623273c
|
[
"MIT"
] | 1 |
2018-12-27T22:10:41.000Z
|
2018-12-27T22:10:41.000Z
|
import colander
from colander import Invalid # noqa
PERSON = 'Person'
COMPANY = 'Company'
ORGANIZATION = 'Organization'
OTHER = 'Other'
CATEGORIES = [PERSON, COMPANY, ORGANIZATION, OTHER]
class Ref(object):
def deserialize(self, node, cstruct):
if cstruct is colander.null:
return colander.null
value = self.decode(cstruct)
if value is None:
raise colander.Invalid(node, 'Missing')
return value
def cstruct_children(self, node, cstruct):
return []
class UserRef(Ref):
def decode(self, cstruct):
from aleph.model.user import User
if isinstance(cstruct, User):
return cstruct
if isinstance(cstruct, (basestring, int)):
return User.by_id(cstruct)
if isinstance(cstruct, dict):
return self.decode(cstruct.get('id'))
return None
class CollectionRef(Ref):
def decode(self, cstruct):
from datawire.model.collection import Collection
if isinstance(cstruct, Collection):
return cstruct
if isinstance(cstruct, (basestring, int)):
return Watchlist.by_id(cstruct)
if isinstance(cstruct, dict):
return self.decode(cstruct.get('id'))
return None
class UserForm(colander.MappingSchema):
email = colander.SchemaNode(colander.String(),
validator=colander.Email())
login = colander.SchemaNode(colander.String())
class CollectionForm(colander.MappingSchema):
slug = colander.SchemaNode(colander.String())
public = colander.SchemaNode(colander.Boolean())
class EntitySelectors(colander.SequenceSchema):
selector = colander.SchemaNode(colander.String())
class EntityForm(colander.MappingSchema):
label = colander.SchemaNode(colander.String())
category = colander.SchemaNode(colander.String(),
validator=colander.OneOf(CATEGORIES))
selectors = EntitySelectors()
collection = colander.SchemaNode(CollectionRef())
| 27.648649 | 72 | 0.655425 |
aacd4dd3418c279bc6886ab5653e5478d6cfa0c7
| 10,865 |
py
|
Python
|
PVGeo/filters/voxelize.py
|
banesullivan/PVGeophysics
|
1ce6311c4e5b195a4a31e3e0b1eb968f44aa34d2
|
[
"BSD-3-Clause"
] | 1 |
2017-08-17T17:38:46.000Z
|
2017-08-17T17:38:46.000Z
|
PVGeo/filters/voxelize.py
|
banesullivan/PVGeophysics
|
1ce6311c4e5b195a4a31e3e0b1eb968f44aa34d2
|
[
"BSD-3-Clause"
] | null | null | null |
PVGeo/filters/voxelize.py
|
banesullivan/PVGeophysics
|
1ce6311c4e5b195a4a31e3e0b1eb968f44aa34d2
|
[
"BSD-3-Clause"
] | 1 |
2018-06-06T05:56:17.000Z
|
2018-06-06T05:56:17.000Z
|
"""This module provides a complicated algorithm for making voxels out of regularly
gridded points. Considering that this algorithm is rather complex, we are keeping
it in its own module until we can simplify it, clean up the code, and make it
capable of handling non-uniformly gridded points
"""
__all__ = [
'VoxelizePoints',
]
__displayname__ = 'Voxelize'
import numpy as np
import vtk
from vtk.numpy_interface import dataset_adapter as dsa
from vtk.util import numpy_support as nps
from .. import _helpers, interface
from ..base import FilterBase
from ..version import check_numpy
from .xyz import RotationTool
###############################################################################
class VoxelizePoints(FilterBase):
"""This makes a ``vtkUnstructuredGrid`` of scattered points given voxel
sizes as input arrays. This assumes that the data is at least 2-Dimensional
on the XY Plane.
"""
__displayname__ = 'Voxelize Points'
__category__ = 'filter'
def __init__(self, **kwargs):
FilterBase.__init__(
self,
nInputPorts=1,
inputType='vtkPointSet',
nOutputPorts=1,
outputType='vtkUnstructuredGrid',
)
self.__dx = kwargs.get('dx', None)
self.__dy = kwargs.get('dy', None)
self.__dz = kwargs.get('dz', None)
self.__estimate_grid = kwargs.get('estimate', True)
self.__safe = kwargs.get('safe', 10.0)
self.__unique = kwargs.get('unique', True)
self.__tolerance = kwargs.get('tolerance', None)
self.__angle = kwargs.get('angle', 0.0)
def add_field_data(self, grid):
"""An internal helper to add the recovered information as field data"""
# Add angle
a = vtk.vtkDoubleArray()
a.SetName('Recovered Angle (Deg.)')
a.SetNumberOfValues(1)
a.SetValue(0, np.rad2deg(self.__angle))
grid.GetFieldData().AddArray(a)
# Add cell sizes
s = vtk.vtkDoubleArray()
s.SetName('Recovered Cell Sizes')
s.SetNumberOfComponents(3)
s.InsertNextTuple3(self.__dx, self.__dy, self.__dz)
grid.GetFieldData().AddArray(s)
return grid
@staticmethod
def add_cell_data(grid, arr, name):
"""Add a NumPy array as cell data to the given grid input"""
c = interface.convert_array(arr, name=name)
grid.GetCellData().AddArray(c)
return grid
def estimate_uniform_spacing(self, x, y, z):
"""This assumes that the input points make up some sort of uniformly
spaced grid on at least an XY plane.
"""
# TODO: implement ability to rotate around Z axis (think PoroTomo vs UTM)
# TODO: implement way to estimate rotation
if not (len(x) == len(y) == len(z)):
raise AssertionError(
'Must have same number of coordinates for all components.'
)
num = len(x)
if num == 1:
# Only one point.. use safe
return x, y, z, self.__safe, self.__safe, self.__safe, 0.0
r = RotationTool()
xr, yr, zr, dx, dy, angle = r.estimate_and_rotate(x, y, z)
self.__angle = angle
uz = np.diff(np.unique(z))
if len(uz) > 0:
dz = np.average(uz)
else:
dz = self.__safe
self.__dx = dx
self.__dy = dy
self.__dz = dz
return xr, yr, zr
def points_to_grid(self, xo, yo, zo, dx, dy, dz, grid=None):
"""Convert XYZ points to a ``vtkUnstructuredGrid``."""
if not check_numpy(alert='warn'):
return grid
if grid is None:
grid = vtk.vtkUnstructuredGrid()
# TODO: Check dtypes on all arrays. Need to be floats
if self.__estimate_grid:
x, y, z = self.estimate_uniform_spacing(xo, yo, zo)
else:
x, y, z = xo, yo, zo
dx, dy, dz = self.__dx, self.__dy, self.__dz
if isinstance(dx, np.ndarray) and len(dx) != len(x):
raise _helpers.PVGeoError(
'X-Cell spacings are not properly defined for all points.'
)
if isinstance(dy, np.ndarray) and len(dy) != len(y):
raise _helpers.PVGeoError(
'Y-Cell spacings are not properly defined for all points.'
)
if isinstance(dz, np.ndarray) and len(dz) != len(z):
raise _helpers.PVGeoError(
'Z-Cell spacings are not properly defined for all points.'
)
n_cells = len(x)
# Generate cell nodes for all points in data set
# - Bottom
c_n1 = np.stack(((x - dx / 2), (y - dy / 2), (z - dz / 2)), axis=1)
c_n2 = np.stack(((x + dx / 2), (y - dy / 2), (z - dz / 2)), axis=1)
c_n3 = np.stack(((x - dx / 2), (y + dy / 2), (z - dz / 2)), axis=1)
c_n4 = np.stack(((x + dx / 2), (y + dy / 2), (z - dz / 2)), axis=1)
# - Top
c_n5 = np.stack(((x - dx / 2), (y - dy / 2), (z + dz / 2)), axis=1)
c_n6 = np.stack(((x + dx / 2), (y - dy / 2), (z + dz / 2)), axis=1)
c_n7 = np.stack(((x - dx / 2), (y + dy / 2), (z + dz / 2)), axis=1)
c_n8 = np.stack(((x + dx / 2), (y + dy / 2), (z + dz / 2)), axis=1)
# - Concatenate
all_nodes = np.concatenate(
(c_n1, c_n2, c_n3, c_n4, c_n5, c_n6, c_n7, c_n8), axis=0
)
pts = vtk.vtkPoints()
cells = vtk.vtkCellArray()
if self.__unique:
# Search for unique nodes and use the min cell size as the tolerance
if self.__tolerance is None:
TOLERANCE = np.min([dx, dy]) / 2.0
else:
TOLERANCE = self.__tolerance
# Round XY plane by the tolerance
txy = np.around(all_nodes[:, 0:2] / TOLERANCE)
all_nodes[:, 0:2] = txy
unique_nodes, ind_nodes = np.unique(all_nodes, return_inverse=True, axis=0)
unique_nodes[:, 0:2] *= TOLERANCE
all_nodes = unique_nodes
else:
ind_nodes = np.arange(0, len(all_nodes), dtype=int)
all_nodes[:, 0:2] = RotationTool.rotate(all_nodes[:, 0:2], -self.__angle)
if self.__estimate_grid:
self.add_field_data(grid)
# Add unique nodes as points in output
pts.SetData(interface.convert_array(all_nodes))
# Add cell vertices
j = np.multiply(np.tile(np.arange(0, 8, 1), n_cells), n_cells)
arridx = np.add(j, np.repeat(np.arange(0, n_cells, 1, dtype=int), 8))
ids = ind_nodes[arridx].reshape((n_cells, 8))
cells_mat = np.concatenate(
(np.ones((ids.shape[0], 1), dtype=np.int_) * ids.shape[1], ids), axis=1
).ravel()
cells = vtk.vtkCellArray()
cells.SetNumberOfCells(n_cells)
cells.SetCells(
n_cells, nps.numpy_to_vtk(cells_mat, deep=True, array_type=vtk.VTK_ID_TYPE)
)
# Set the output
grid.SetPoints(pts)
grid.SetCells(vtk.VTK_VOXEL, cells)
return grid
@staticmethod
def _copy_arrays(pdi, pdo):
"""internal helper to copy arrays from point data to cell data in the voxels."""
for i in range(pdi.GetPointData().GetNumberOfArrays()):
arr = pdi.GetPointData().GetArray(i)
_helpers.add_array(pdo, 1, arr) # adds to CELL data
return pdo
def RequestData(self, request, inInfo, outInfo):
"""Used by pipeline to generate output"""
# Get input/output of Proxy
pdi = self.GetInputData(inInfo, 0, 0)
pdo = self.GetOutputData(outInfo, 0)
# Perform task
wpdi = dsa.WrapDataObject(pdi)
pts = wpdi.Points
x, y, z = pts[:, 0], pts[:, 1], pts[:, 2]
self.points_to_grid(x, y, z, self.__dx, self.__dy, self.__dz, grid=pdo)
# Now append data to grid
self._copy_arrays(pdi, pdo)
return 1
#### Setters and Getters ####
def set_safe_size(self, safe):
"""A voxel size to use if a spacing cannot be determined for an axis"""
if self.__safe != safe:
self.__safe = safe
self.Modified()
def set_delta_x(self, dx):
"""Set the X cells spacing
Args:
dx (float or np.array(floats)): the spacing(s) for the cells in
the X-direction
"""
self.__dx = dx
self.Modified()
def set_delta_y(self, dy):
"""Set the Y cells spacing
Args:
dy (float or np.array(floats)): the spacing(s) for the cells in
the Y-direction
"""
self.__dy = dy
self.Modified()
def set_delta_z(self, dz):
"""Set the Z cells spacing
Args:
dz (float or np.array(floats)): the spacing(s) for the cells in
the Z-direction
"""
self.__dz = dz
self.set_safe_size(np.min(dz))
self.Modified()
def set_deltas(self, dx, dy, dz):
"""Set the cell spacings for each axial direction
Args:
dx (float or np.array(floats)): the spacing(s) for the cells in
the X-direction
dy (float or np.array(floats)): the spacing(s) for the cells in
the Y-direction
dz (float or np.array(floats)): the spacing(s) for the cells in
the Z-direction
"""
self.set_delta_x(dx)
self.set_delta_y(dy)
self.set_delta_z(dz)
def set_estimate_grid(self, flag):
"""Set a flag on whether or not to estimate the grid spacing/rotation"""
if self.__estimate_grid != flag:
self.__estimate_grid = flag
self.Modified()
def set_unique(self, flag):
"""Set a flag on whether or not to try to elimate non unique elements"""
if self.__unique != flag:
self.__unique = flag
self.Modified()
def get_angle(self, degrees=True):
"""Returns the recovered angle if set to recover the input grid. If the
input points are rotated, then this angle will reflect a close
approximation of that rotation.
Args:
degrees (bool): A flag on to return decimal degrees or radians.
"""
if degrees:
return np.rad2deg(self.__angle)
return self.__angle
def get_recovered_angle(self, degrees=True):
"""DEPRECATED: use `get_angle`"""
return self.get_angle(degrees=degrees)
def set_angle(self, angle):
"""Set the rotation angle manually"""
if self.__angle != angle:
self.__angle = angle
self.Modified()
def get_spacing(self):
"""Get the cell spacings"""
return (self.__dx, self.__dy, self.__dz)
###############################################################################
| 35.048387 | 88 | 0.562632 |
e899e3c50d64f528d791584d14d1c8dc2afe943c
| 3,608 |
py
|
Python
|
eval/embed_files_with_issues.py
|
TobiasGleissner/embed_modal
|
746e3efb6f4c6cf70cc5b67f9c8f2ea3657328ec
|
[
"BSD-3-Clause"
] | 5 |
2018-06-20T14:52:55.000Z
|
2022-02-21T15:51:56.000Z
|
eval/embed_files_with_issues.py
|
TobiasGleissner/embed_modal
|
746e3efb6f4c6cf70cc5b67f9c8f2ea3657328ec
|
[
"BSD-3-Clause"
] | 5 |
2017-12-08T12:27:46.000Z
|
2018-03-27T06:32:49.000Z
|
eval/embed_files_with_issues.py
|
leoprover/embed_modal
|
746e3efb6f4c6cf70cc5b67f9c8f2ea3657328ec
|
[
"BSD-3-Clause"
] | 1 |
2018-04-05T20:02:31.000Z
|
2018-04-05T20:02:31.000Z
|
from common import embed,accumulate_csv,create_dict_from_problems,iterate_dict,filename_to_path
from check_consistency import check_consistency_iteration_callback
from starexec_create_configurations import get_transformation_abbreviation
import sys
from pathlib import Path
def main(qmltp_dir,out_dir,csv_file_list):
bin_treelimitedrun = "/home/tg/embed_modal/eval/TreeLimitedRun"
bin_embed = []
bin_embed.append("java -jar /home/tg/embed_modal/embed/target/embed-1.0-SNAPSHOT-shaded.jar") # dev
bin_embed.append("java -jar /home/tg/oldemb/e_before_type/embed/target/embed-1.0-SNAPSHOT-shaded.jar") #_before_type
#bin_embed.append("java -jar /home/tg/oldemb/e_after_type/embed/target/embed-1.0-SNAPSHOT-shaded.jar") #_after_type
#quantification = "$varying"
#system = "$modal_system_S4"
#sem = {"system":system,"quantification":quantification,"consequence":"$local","constants":"$rigid"}
#params = ["semantic_constant_quantification","semantic_cumulative_quantification","semantic_decreasing_quantification","semantic_modality_axiomatization"]
#params_old = ["semantic_monotonic_quantification","semantic_antimonotonic_quantification","semantic_modality_axiomatization"]
#params_very_old = []
#problemfile = "/home/tg/embed_modal/eval/datasets/qmltp_thf_standard/GLC/GLC414+1.p"
problem_list = accumulate_csv(csv_file_list)
problem_dict = create_dict_from_problems(problem_list)
filename_to_issue = {}
iterate_dict(problem_dict, check_consistency_iteration_callback, filename_to_issue)
Path(out_dir).mkdir(exist_ok=True)
for filename in filename_to_issue:
issue_list = filename_to_issue[filename]
for issue_dict in issue_list:
quantification = issue_dict['quantification']
system = issue_dict['system']
sem = {"system":system,"quantification":quantification,"consequence":"$local","constants":"$rigid"}
params = []
params.append(["semantic_constant_quantification","semantic_cumulative_quantification","semantic_decreasing_quantification","semantic_modality_axiomatization"])
params.append([])
#params.append(["semantic_monotonic_quantification","semantic_antimonotonic_quantification","semantic_modality_axiomatization"])
if quantification != "$varying":
continue
if system == "$modal_system_K":
continue
problemfile = filename_to_path(qmltp_dir,filename)
print("currently processing",problemfile,system,quantification)
with open(problemfile,"r") as fh:
problem = fh.read()
for i in range(len(bin_embed)):
outfile = Path(out_dir) / (filename + "_" + system.replace("$modal_system","") + "_" + quantification.replace("$","") + "_" + str(i) + ".p")
if outfile.exists():
print(str(outfile) + " already exists.")
continue
e = embed(bin_treelimitedrun, bin_embed[i],problem,params[i],sem,120,120)
with open(outfile,"w+")as fw:
fw.write(e['embedded_problem'])
outOriginal = Path(out_dir) / (filename + "_" + system.replace("$modal_system","") + "_" + quantification.replace("$","") + "_" + str(i) + "_original" + ".p")
with open(outOriginal,"w+")as fw:
fw.write("% " + str(sem))
fw.write(problem)
if __name__ == "__main__":
main(sys.argv[1],sys.argv[2],sys.argv[3:])
| 58.193548 | 178 | 0.668792 |
ddc69f5423d8baf1799f81ef5ac2159fc95581fd
| 548 |
py
|
Python
|
cowrie/commands/__init__.py
|
johnfoo/cowrie
|
d74d96a2d5355f0fb790ad8041e97420fb61371d
|
[
"BSD-3-Clause"
] | 3 |
2018-11-15T07:20:24.000Z
|
2021-06-10T03:34:56.000Z
|
cowrie/commands/__init__.py
|
johnfoo/cowrie
|
d74d96a2d5355f0fb790ad8041e97420fb61371d
|
[
"BSD-3-Clause"
] | null | null | null |
cowrie/commands/__init__.py
|
johnfoo/cowrie
|
d74d96a2d5355f0fb790ad8041e97420fb61371d
|
[
"BSD-3-Clause"
] | 8 |
2015-12-17T05:41:51.000Z
|
2019-09-27T05:06:37.000Z
|
# Copyright (c) 2009 Upi Tamminen <[email protected]>
# See the COPYRIGHT file for more information
__all__ = [
'adduser',
'apt',
'base',
'busybox',
'curl',
'dd',
'env',
'ethtool',
'free',
'fs',
'ftpget',
'gcc',
'ifconfig',
'iptables',
'last',
'ls',
'nc',
'netstat',
'nohup',
'ping',
'scp',
'service',
'sleep',
'ssh',
'sudo',
'tar',
'uname',
'ulimit',
'wget',
'which',
'perl',
'uptime',
'python',
'tftp'
]
| 13.7 | 54 | 0.441606 |
1be3b18524360cf0e3aeb82804d6276040e4a64e
| 1,023 |
py
|
Python
|
tools/dev/position_control.py
|
gbalke/bldc-controller
|
99e4e71d5bdc0c7c7901d886aa7709c66db8b718
|
[
"MIT"
] | null | null | null |
tools/dev/position_control.py
|
gbalke/bldc-controller
|
99e4e71d5bdc0c7c7901d886aa7709c66db8b718
|
[
"MIT"
] | null | null | null |
tools/dev/position_control.py
|
gbalke/bldc-controller
|
99e4e71d5bdc0c7c7901d886aa7709c66db8b718
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import sys
import numpy as np
from comms import *
import serial
import time
port = sys.argv[1]
s = serial.Serial(port=port, baudrate=COMM_DEFAULT_BAUD_RATE, timeout=0.001)
print s.BAUDRATES
client = BLDCControllerClient(s)
client.leaveBootloader(0x01)
print("hello")
s.flush()
time.sleep(0.1)
print("hello")
client.writeRegisters(0x01, 0x0101, 1, struct.pack('<H', 9346) )
print("hello")
client.writeRegisters(0x01, 0x0106, 1, struct.pack('<f', 0) )
print("hello")
client.writeRegisters(0x01, 0x0102, 1, struct.pack('<B', 0) )
print("hello")
position_setpoint = 5000
next_step = time.time() + 1
while True:
duty_cycle = 0.0
angle = struct.unpack('<H', client.readRegisters(0x01, 0x100, 1))[0]
duty_cycle = min(max((angle - position_setpoint) * 0.001, -1), 1)
client.writeRegisters(0x01, 0x0106, 1, struct.pack('<f', duty_cycle) )
if time.time() > next_step:
print("hellO")
position_setpoint += 1000
position_setpoint %= 2 ** 14
next_step += 1
| 25.575 | 76 | 0.684262 |
df2c1601232d98489d178a10df9a0e0462c92af3
| 20,954 |
py
|
Python
|
tests/acceptance/steps/multi_file_steps.py
|
aoxiangflysky/onedata
|
5fe5783f4fb23e90e6567d638a165a0bfcc2f663
|
[
"Apache-2.0"
] | 61 |
2016-04-19T23:51:37.000Z
|
2022-01-02T22:28:53.000Z
|
tests/acceptance/steps/multi_file_steps.py
|
aoxiangflysky/onedata
|
5fe5783f4fb23e90e6567d638a165a0bfcc2f663
|
[
"Apache-2.0"
] | 57 |
2016-08-23T13:36:47.000Z
|
2022-02-08T14:30:30.000Z
|
tests/acceptance/steps/multi_file_steps.py
|
aoxiangflysky/onedata
|
5fe5783f4fb23e90e6567d638a165a0bfcc2f663
|
[
"Apache-2.0"
] | 7 |
2016-08-26T06:08:58.000Z
|
2019-11-16T19:22:28.000Z
|
"""Module implements common steps for operation on files (both regular files
and directories)in multi-client environment.
"""
__author__ = "Jakub Kudzia"
__copyright__ = "Copyright (C) 2015 ACK CYFRONET AGH"
__license__ = "This software is released under the MIT license cited in " \
"LICENSE.txt"
from tests.utils.acceptance_utils import *
from tests.utils.utils import assert_generic, assert_
from tests.utils.client_utils import (ls, mv, chmod, stat, rm, touch,
create_file, osrename, setxattr,
getxattr, listxattr, removexattr)
from tests.utils.docker_utils import run_cmd
import os
import stat as stat_lib
import json, jsondiff
import pytest
@when(parsers.re('(?P<user>\w+) updates (?P<files>.*) timestamps on'
' (?P<client_node>.*)'))
def touch_file(user, files, client_node, context):
touch_file_base(user, files, client_node, context)
@when(parsers.re('(?P<user>\w+) fails to update (?P<files>.*) timestamps '
'on (?P<client_node>.*)'))
def touch_file_fail(user, files, client_node, context):
touch_file_base(user, files, client_node, context, should_fail=True)
def touch_file_base(user, files, client_node, context, should_fail=False):
user = context.get_user(user)
client = user.get_client(client_node)
files = list_parser(files)
for file in files:
file_path = client.absolute_path(file)
def condition():
try:
touch(client, file_path)
except OSError:
return True if should_fail else False
else:
return False if should_fail else True
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) creates regular files (?P<files>.*) '
'on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) creates regular files (?P<files>.*) '
'on (?P<client_node>.*)'))
def create_reg_file(user, files, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
files = list_parser(files)
for file in files:
file_path = client.absolute_path(file)
def condition():
create_file(client, file_path)
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) creates children files of (?P<parent_dir>.*) '
'with names in range \[(?P<lower>.*), (?P<upper>.*)\) on '
'(?P<client_node>.*)'), converters=dict(lower=int, upper=int))
@then(parsers.re('(?P<user>\w+) creates children files of (?P<parent_dir>.*) '
'with names in range \[(?P<lower>.*), (?P<upper>.*)\) on '
'(?P<client_node>.*)'), converters=dict(lower=int, upper=int))
def create_many(user, lower, upper, parent_dir, client_node, context):
for i in range(lower, upper):
new_file = os.path.join(parent_dir, str(i))
create_reg_file(user, make_arg_list(new_file), client_node, context)
@wt(parsers.re('(?P<user>\w+) can stat (?P<files>.*) in (?P<path>.*)'
' on (?P<client_node>.*)'))
def stat_present(user, path, files, client_node, context):
client = context.get_client(user, client_node)
path = client.absolute_path(path)
files = list_parser(files)
def condition():
for f in files:
stat(client, os.path.join(path, f))
assert_(client.perform, condition)
@wt(parsers.re('(?P<user>\w+) can\'t stat (?P<files>.*) in (?P<path>.*) on '
'(?P<client_node>.*)'))
def stat_absent(user, path, files, client_node, context):
client = context.get_client(user, client_node)
path = client.absolute_path(path)
files = list_parser(files)
def condition():
for f in files:
with pytest.raises(OSError,
message = 'File {} exists in {}'.format(f, path)):
stat(client, os.path.join(path, f))
assert_(client.perform, condition)
@when(parsers.re('(?P<directory>.*) is empty for (?P<user>\w+) on (?P<client_node>.*)'))
@then(parsers.re('(?P<directory>.*) is empty for (?P<user>\w+) on (?P<client_node>.*)'))
def ls_empty(directory, user, client_node, context):
client = context.get_client(user, client_node)
dir_path = client.absolute_path(directory)
def condition():
assert len(ls(client, dir_path)) == 0
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) sees (?P<files>.*) in (?P<path>.*) '
'on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) sees (?P<files>.*) in (?P<path>.*) '
'on (?P<client_node>.*)'))
def ls_present(user, files, path, client_node, context):
client = context.get_client(user, client_node)
path = client.absolute_path(path)
files = list_parser(files)
def condition():
listed_files = ls(client, path)
for file in files:
assert file in listed_files
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) lists only children of (?P<parent_dir>.*) with names'
' in range \[(?P<lower>.*), (?P<upper>.*)\) on (?P<client_node>.*)'),
converters=dict(lower=int,upper=int))
@then(parsers.re('(?P<user>\w+) lists only children of (?P<parent_dir>.*) with names'
' in range \[(?P<lower>.*), (?P<upper>.*)\) on (?P<client_node>.*)'),
converters=dict(lower=int,upper=int))
def ls_children(user, parent_dir, lower, upper, client_node, context):
client = context.get_client(user, client_node)
path = client.absolute_path(parent_dir)
files_num = upper - lower
def condition():
listed_files = ls(client, path)
assert len(listed_files) == files_num
for i in range(lower, upper):
assert str(i) in listed_files
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) doesn\'t see (?P<files>.*) in (?P<path>.*) '
'on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) doesn\'t see (?P<files>.*) in (?P<path>.*) '
'on (?P<client_node>.*)'))
def ls_absent(user, files, path, client_node, context):
client = context.get_client(user, client_node)
path = client.absolute_path(path)
files = list_parser(files)
def condition():
listed_files = ls(client, path)
for file in files:
assert file not in listed_files
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) moves (?P<file1>.*) to (?P<file2>.*) '
'using shell command on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) moves (?P<file1>.*) to (?P<file2>.*) '
'using shell command on (?P<client_node>.*)'))
def shell_move(user, file1, file2, client_node, context):
shell_move_base(user, file1, file2, client_node, context)
@when(parsers.re('(?P<user>\w+) fails to move (?P<file1>.*) to (?P<file2>.*) '
'using shell command on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) fails to move (?P<file1>.*) to (?P<file2>.*) '
'using shell command on (?P<client_node>.*)'))
def shell_move_fail(user, file1, file2, client_node, context):
shell_move_base(user, file1, file2, client_node, context, should_fail=True)
def shell_move_base(user, file1, file2, client_node, context, should_fail=False):
user = context.get_user(user)
client = user.get_client(client_node)
src = client.absolute_path(file1)
dest = client.absolute_path(file2)
def condition():
mv(client, src, dest)
cmd = "mv (?P<0>.*) (?P<1>.*)".format(src, dest)
run_cmd(user.name, client, cmd, output=True, error=True)
assert_generic(client.perform, should_fail, condition)
@when(parsers.re('(?P<user>\w+) renames (?P<file1>.*) to (?P<file2>.*)'
' on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) renames (?P<file1>.*) to (?P<file2>.*)'
' on (?P<client_node>.*)'))
def rename(user, file1, file2, client_node, context):
mv_base(user, file1, file2, client_node, context)
@when(parsers.re('(?P<user>\w+) fails to rename (?P<file1>.*) to '
'(?P<file2>.*) on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) fails to rename (?P<file1>.*) to '
'(?P<file2>.*) on (?P<client_node>.*)'))
def rename_fail(user, file1, file2, client_node, context):
rename_base(user, file1, file2, client_node, context, should_fail=True)
def rename_base(user, file1, file2, client_node, context, should_fail=False):
user = context.get_user(user)
client = user.get_client(client_node)
src = client.absolute_path(file1)
dest = client.absolute_path(file2)
def condition():
osrename(client, src, dest)
assert_generic(client.perform, should_fail, condition)
def mv_base(user, file1, file2, client_node, context, should_fail=False):
user = context.get_user(user)
client = user.get_client(client_node)
src = client.absolute_path(file1)
dest = client.absolute_path(file2)
def condition():
mv(client, src, dest)
assert_generic(client.perform, should_fail, condition)
@when(parsers.re('(?P<user>\w+) deletes files (?P<files>.*) on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) deletes files (?P<files>.*) on (?P<client_node>.*)'))
def delete_file(user, files, client_node, context):
delete_file_base(user, files, client_node, context)
@when(parsers.re('(?P<user>\w+) fails to delete files (?P<files>.*) '
'on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) fails to delete files (?P<files>.*) '
'on (?P<client_node>.*)'))
def delete_file_fail(user, files, client_node, context):
delete_file_base(user, files, client_node, context, should_fail=True)
def delete_file_base(user, files, client_node, context, should_fail=False):
user = context.get_user(user)
client = user.get_client(client_node)
files = list_parser(files)
for file in files:
path = client.absolute_path(file)
def condition():
rm(client, path)
assert_generic(client.perform, should_fail, condition)
@when(parsers.re('(?P<user>\w+) changes (?P<file>.*) mode to (?P<mode>.*) on '
'(?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) changes (?P<file>.*) mode to (?P<mode>.*) on '
'(?P<client_node>.*)'))
def change_mode(user, file, mode, client_node, context):
change_mode_base(user, file, mode, client_node, context)
@when(parsers.re('(?P<user>\w+) fails to change (?P<file>.*) mode to '
'(?P<mode>.*) on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) fails to change (?P<file>.*) mode to '
'(?P<mode>.*) on (?P<client_node>.*)'))
def change_mode_fail(user, file, mode, client_node, context):
change_mode_base(user, file, mode, client_node, context, should_fail=True)
def change_mode_base(user, file, mode, client_node, context, should_fail=False):
user = context.get_user(user)
client = user.get_client(client_node)
mode = int(mode, 8)
file_path = client.absolute_path(file)
def condition():
chmod(client, mode, file_path)
assert_generic(client.perform, should_fail, condition)
@then(parsers.re('file type of (?P<user>\w+)\'s (?P<file>.*) is (?P<file_type>.*) '
'on (?P<client_node>.*)'))
def check_type(user, file, file_type, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
if file_type == "regular":
stat_method = "S_ISREG"
elif file_type == "directory":
stat_method = "S_ISDIR"
def condition():
stat_result = stat(client, file_path)
assert getattr(stat_lib, stat_method)(stat_result.st_mode)
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) checks using shell stat if file type '
'of (?P<file>.*) is (?P<file_type>.*) on (?P<client_node>.*)'))
def shell_check_type(user, file, file_type, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
cmd = "stat --format=%F {}".format(file_path)
stat_file_type = run_cmd(user.name, client, cmd, output=True)
assert stat_file_type == file_type
assert_(client.perform, condition)
@when(parsers.re('mode of (?P<user>\w+)\'s (?P<file>.*) is (?P<mode>.*) on '
'(?P<client_node>.*)'))
@then(parsers.re('mode of (?P<user>\w+)\'s (?P<file>.*) is (?P<mode>.*) on '
'(?P<client_node>.*)'))
def check_mode(user, file, mode, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
mode = int(mode, 8)
def condition():
stat_result = stat(client, file_path)
assert stat_lib.S_IMODE(stat_result.st_mode) == mode
assert_(client.perform, condition)
@when(parsers.re('size of (?P<user>\w+)\'s (?P<file>.*) is (?P<size>.*) bytes '
'on (?P<client_node>.*)'))
@then(parsers.re('size of (?P<user>\w+)\'s (?P<file>.*) is (?P<size>.*) bytes '
'on (?P<client_node>.*)'))
def check_size(user, file, size, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
size = int(size)
def condition():
stat_result = stat(client, file_path)
assert stat_result.st_size == size
assert_(client.perform, condition)
@when(parsers.re('(?P<user>\w+) records (?P<files>.*) '
'stats on (?P<client_node>.*)'))
@then(parsers.re('(?P<user>\w+) records (?P<files>.*) '
'stats on (?P<client_node>.*)'))
def record_stats(user, files, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
for file_ in list_parser(files):
file_path = client.absolute_path(file_)
client.file_stats[file_path] = stat(client, file_path)
@then(parsers.re('(?P<time1>.*) time of (?P<user>\w+)\'s (?P<file>.*) is '
'(?P<comparator>.*) to (?P<time2>.*) time on (?P<client_node>.*)'))
@then(parsers.re('(?P<time1>.*) time of (?P<user>\w+)\'s (?P<file>.*) is '
'(?P<comparator>.*) than (?P<time2>.*) time on (?P<client_node>.*)'))
def check_time(user, time1, time2, comparator, file, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
attr1 = time_attr(time1)
attr2 = time_attr(time2)
file_path = client.absolute_path(file)
def condition():
stat_result = stat(client, file_path)
t1 = getattr(stat_result, attr1)
t2 = getattr(stat_result, attr2)
assert compare(t1, t2, comparator)
assert_(client.perform, condition)
@then(parsers.re('(?P<time1>.*) time of (?P<user>\w+)\'s (?P<file1>.*) is '
'(?P<comparator>.*) to recorded one of (?P<file2>.*)'))
@then(parsers.re('(?P<time1>.*) time of (?P<user>\w+)\'s (?P<file1>.*) is '
'(?P<comparator>.*) than recorded one of (?P<file2>.*)'))
def cmp_time_to_previous(user, time1, comparator, file1, file2,
client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
attr = time_attr(time1)
file_path = client.absolute_path(file1)
recorded_stats = client.file_stats[client.absolute_path(file2)]
def condition():
stat_result = stat(client, file_path)
t1 = getattr(stat_result, attr)
t2 = getattr(recorded_stats, attr)
assert compare(t1, t2, comparator)
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) sets extended attribute (?P<name>[.\w]+) '
'with value (?P<value>.*) on (?P<file>\w+)'
'on (?P<client_node>.*)'))
@when(parsers.re('(?P<user>\w+) sets extended attribute (?P<name>[.\w]+) '
'with value (?P<value>.*) on (?P<file>\w+)'
'on (?P<client_node>.*)'))
def set_xattr(user, file, name, value, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
value_bytes = None
if isinstance(value, str):
value_bytes = value
elif isinstance(value, unicode):
value_bytes = value.encode('utf-8')
else:
value_bytes = str(value)
setxattr(client, file_path, name, value_bytes)
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) removes extended attribute (?P<name>[.\w]+) '
'from (?P<file>\w+) on (?P<client_node>.*)'))
@when(parsers.re('(?P<user>\w+) removes extended attribute (?P<name>[.\w]+) '
'from (?P<file>\w+) on (?P<client_node>.*)'))
def remove_xattr(user, file, name, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
removexattr(client, file_path, name)
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) checks if (?P<file>\w+) has extended '
'attribute (?P<name>[.\w]+) on (?P<client_node>.*)'))
def check_xattr_exists(user, file, name, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
xattrs = listxattr(client, file_path)
assert name in xattrs
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) checks if (?P<file>\w+) does not have extended '
'attribute (?P<name>[.\w]+) on (?P<client_node>.*)'))
def check_xattr_doesnt_exist(user, file, name, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
xattrs = listxattr(client, file_path)
assert name not in xattrs
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) checks if (?P<file>\w+) has extended '
'attribute (?P<name>[.\w]+) with string value "(?P<value>.*)" '
'on (?P<client_node>.*)'))
def check_string_xattr(user, file, name, value, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
xattr_value = getxattr(client, file_path, name)
value_utf = None
if isinstance(value, str):
value_utf = value
elif isinstance(value, unicode):
value_utf = value.encode('utf-8')
else:
value_utf = str(value)
assert xattr_value == value_utf
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) checks if (?P<file>\w+) has extended '
'attribute (?P<name>[.\w]+) with numeric value (?P<value>.*) '
'on (?P<client_node>.*)'))
def check_numeric_xattr(user, file, name, value, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
xattr_value = getxattr(client, file_path, name)
assert float(xattr_value) == float(value)
assert_(client.perform, condition)
@then(parsers.re('(?P<user>\w+) checks if (?P<file>\w+) has extended '
'attribute (?P<name>[.\w]+) with JSON value "(?P<value>.*)" '
'on (?P<client_node>.*)'))
def check_json_xattr(user, file, name, value, client_node, context):
user = context.get_user(user)
client = user.get_client(client_node)
file_path = client.absolute_path(file)
def condition():
xattr_value = getxattr(client, file_path, name)
assert jsondiff.diff(json.loads(xattr_value), json.loads(value)) == {}
assert_(client.perform, condition)
################################################################################
def time_attr(parameter):
return{
'access': 'st_atime',
'modification': 'st_mtime',
'status-change': 'st_ctime'
}[parameter]
def compare(val1, val2, comparator):
if comparator == 'equal':
return val1 == val2
elif comparator == 'not equal':
return val1 != val2
elif comparator == 'greater':
return val1 > val2
elif comparator == 'less':
return val1 < val2
elif comparator == 'not greater':
return val1 <= val2
elif comparator == 'not less':
return val1 >= val2
else:
raise ValueError("Wrong argument comparator to function compare")
| 36.441739 | 88 | 0.61215 |
f19c979551f66f87619c440dc266e8d9cef9102a
| 3,632 |
py
|
Python
|
sprokit/tests/bindings/python/modules/test-pymodules.py
|
neal-siekierski/kwiver
|
1c97ad72c8b6237cb4b9618665d042be16825005
|
[
"BSD-3-Clause"
] | null | null | null |
sprokit/tests/bindings/python/modules/test-pymodules.py
|
neal-siekierski/kwiver
|
1c97ad72c8b6237cb4b9618665d042be16825005
|
[
"BSD-3-Clause"
] | null | null | null |
sprokit/tests/bindings/python/modules/test-pymodules.py
|
neal-siekierski/kwiver
|
1c97ad72c8b6237cb4b9618665d042be16825005
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
#ckwg +28
# Copyright 2012-2013 by Kitware, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither name of Kitware, Inc. nor the names of any contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
def test_import():
try:
import vital.modules.modules
except:
test_error("Failed to import the modules module")
def test_load():
from vital.modules import modules
from sprokit.pipeline import process_factory
modules.load_known_modules()
types = process_factory.types()
if 'test_python_process' not in types:
test_error("Failed to load Python processes")
# TEST_PROPERTY(ENVIRONMENT, SPROKIT_NO_PYTHON_MODULES=)
def test_masking():
from vital.modules import modules
from sprokit.pipeline import process_factory
modules.load_known_modules()
types = process_factory.types()
if 'test_python_process' in types:
test_error("Failed to mask out Python processes")
# TEST_PROPERTY(ENVIRONMENT, SPROKIT_PYTHON_MODULES=sprokit.test.python.modules)
def test_extra_modules():
from vital.modules import modules
from sprokit.pipeline import process_factory
modules.load_known_modules()
types = process_factory.types()
if 'extra_test_python_process' not in types:
test_error("Failed to load extra Python processes")
# TEST_PROPERTY(ENVIRONMENT, PYTHONPATH=@CMAKE_CURRENT_SOURCE_DIR@)
def test_pythonpath():
from vital.modules import modules
from sprokit.pipeline import process_factory
from sprokit.pipeline import scheduler_factory
modules.load_known_modules()
types = process_factory.types()
if 'pythonpath_test_process' not in types:
test_error("Failed to load extra Python processes accessible from PYTHONPATH")
types = scheduler_factory.types()
if 'pythonpath_test_scheduler' not in types:
test_error("Failed to load extra Python schedulers accessible from PYTHONPATH")
if __name__ == '__main__':
import os
import sys
if not len(sys.argv) == 4:
test_error("Expected three arguments")
sys.exit(1)
testname = sys.argv[1]
os.chdir(sys.argv[2])
sys.path.append(sys.argv[3])
from sprokit.test.test import *
run_test(testname, find_tests(locals()))
| 32.141593 | 87 | 0.746421 |
b042aeab3d0561e24508dc8e20978d6f21b6f58b
| 10,047 |
py
|
Python
|
tests/beem/test_amount.py
|
abitmore/beem
|
2026833a836007e45f16395a9ca3b31d02e98f87
|
[
"MIT"
] | 118 |
2018-03-06T07:26:19.000Z
|
2022-03-21T20:16:04.000Z
|
tests/beem/test_amount.py
|
abitmore/beem
|
2026833a836007e45f16395a9ca3b31d02e98f87
|
[
"MIT"
] | 248 |
2018-03-20T18:03:39.000Z
|
2022-03-28T16:38:09.000Z
|
tests/beem/test_amount.py
|
abitmore/beem
|
2026833a836007e45f16395a9ca3b31d02e98f87
|
[
"MIT"
] | 81 |
2018-04-27T15:27:52.000Z
|
2021-10-31T06:14:25.000Z
|
# -*- coding: utf-8 -*-
import unittest
from parameterized import parameterized
from beem import Steem
from beem.amount import Amount
from beem.asset import Asset
from beem.instance import set_shared_blockchain_instance, SharedInstance
from decimal import Decimal
from .nodes import get_hive_nodes, get_steem_nodes
class Testcases(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.bts = Steem(
node=get_hive_nodes(),
nobroadcast=True,
num_retries=10
)
set_shared_blockchain_instance(cls.bts)
cls.asset = Asset("HBD")
cls.symbol = cls.asset["symbol"]
cls.precision = cls.asset["precision"]
cls.asset2 = Asset("HIVE")
def dotest(self, ret, amount, symbol):
self.assertEqual(float(ret), float(amount))
self.assertEqual(ret["symbol"], symbol)
self.assertIsInstance(ret["asset"], dict)
self.assertIsInstance(ret["amount"], Decimal)
def test_init(self):
stm = self.bts
# String init
asset = Asset("HBD", blockchain_instance=stm)
symbol = asset["symbol"]
precision = asset["precision"]
amount = Amount("1 {}".format(symbol), blockchain_instance=stm)
self.dotest(amount, 1, symbol)
# Amount init
amount = Amount(amount, blockchain_instance=stm)
self.dotest(amount, 1, symbol)
# blockchain dict init
amount = Amount({
"amount": 1 * 10 ** precision,
"asset_id": asset["id"]
}, blockchain_instance=stm)
self.dotest(amount, 1, symbol)
# API dict init
amount = Amount({
"amount": 1.3 * 10 ** precision,
"asset": asset["id"]
}, blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
# Asset as symbol
amount = Amount(1.3, Asset("HBD"), blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
# Asset as symbol
amount = Amount(1.3, symbol, blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
# keyword inits
amount = Amount(amount=1.3, asset=Asset("HBD", blockchain_instance=stm), blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
amount = Amount(amount=1.3001, asset=Asset("HBD", blockchain_instance=stm), blockchain_instance=stm)
self.dotest(amount, 1.3001, symbol)
amount = Amount(amount=1.3001, asset=Asset("HBD", blockchain_instance=stm), fixed_point_arithmetic=True, blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
# keyword inits
amount = Amount(amount=1.3, asset=dict(Asset("HBD", blockchain_instance=stm)), blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
# keyword inits
amount = Amount(amount=1.3, asset=symbol, blockchain_instance=stm)
self.dotest(amount, 1.3, symbol)
amount = Amount(amount=8.190, asset=symbol, blockchain_instance=stm)
self.dotest(amount, 8.190, symbol)
def test_copy(self):
amount = Amount("1", self.symbol)
self.dotest(amount.copy(), 1, self.symbol)
def test_properties(self):
amount = Amount("1", self.symbol)
self.assertEqual(amount.amount, 1.0)
self.assertEqual(amount.symbol, self.symbol)
self.assertIsInstance(amount.asset, Asset)
self.assertEqual(amount.asset["symbol"], self.symbol)
def test_tuple(self):
amount = Amount("1", self.symbol)
self.assertEqual(
amount.tuple(),
(1.0, self.symbol))
def test_json_appbase(self):
asset = Asset("HBD", blockchain_instance=self.bts)
amount = Amount("1", asset, new_appbase_format=False, blockchain_instance=self.bts)
if self.bts.rpc.get_use_appbase():
self.assertEqual(
amount.json(),
[str(1 * 10 ** asset.precision), asset.precision, asset.asset])
else:
self.assertEqual(amount.json(), "1.000 HBD")
def test_json_appbase2(self):
asset = Asset("HBD", blockchain_instance=self.bts)
amount = Amount("1", asset, new_appbase_format=True, blockchain_instance=self.bts)
if self.bts.rpc.get_use_appbase():
self.assertEqual(
amount.json(),
{'amount': str(1 * 10 ** asset.precision), 'nai': asset.asset, 'precision': asset.precision})
else:
self.assertEqual(amount.json(), "1.000 HBD")
def test_string(self):
self.assertEqual(
str(Amount("10000", self.symbol)),
"10000.000 {}".format(self.symbol))
def test_int(self):
self.assertEqual(
int(Amount("0.9999", self.symbol)),
999)
self.assertEqual(
int(Amount(0.151, self.symbol)),
151)
self.assertEqual(
int(Amount(8.190, self.symbol)),
8190)
self.assertEqual(
int(Amount(round(0.1509,3), self.symbol)),
151)
self.assertEqual(
int(Amount(round(0.1509,3), self.asset)),
151)
self.assertEqual(
int(Amount(int(1), self.symbol)),
1000)
self.assertEqual(
int(Amount(amount=round(0.1509,3), asset=Asset("HBD"))),
151)
def test_dict(self):
self.assertEqual(int(Amount({'amount': '150', 'nai': '@@000000021', 'precision': 3})), 150)
def test_float(self):
self.assertEqual(
float(Amount("1", self.symbol)),
1.00000)
self.assertEqual(
float(Amount(0.151, self.symbol)),
0.151)
self.assertEqual(
float(Amount(round(0.1509, 3), self.symbol)),
0.151)
self.assertEqual(
float(Amount(8.190, self.symbol)),
8.190)
def test_plus(self):
a1 = Amount(1, self.symbol)
a2 = Amount(2, self.symbol)
self.dotest(a1 + a2, 3, self.symbol)
self.dotest(a1 + 2, 3, self.symbol)
with self.assertRaises(Exception):
a1 + Amount(1, asset=self.asset2)
# inline
a2 = Amount(2, self.symbol)
a2 += a1
self.dotest(a2, 3, self.symbol)
a2 += 5
self.dotest(a2, 8, self.symbol)
a2 += Decimal(2)
self.dotest(a2, 10, self.symbol)
with self.assertRaises(Exception):
a1 += Amount(1, asset=self.asset2)
def test_minus(self):
a1 = Amount(1, self.symbol)
a2 = Amount(2, self.symbol)
self.dotest(a1 - a2, -1, self.symbol)
self.dotest(a1 - 5, -4, self.symbol)
with self.assertRaises(Exception):
a1 - Amount(1, asset=self.asset2)
# inline
a2 = Amount(2, self.symbol)
a2 -= a1
self.dotest(a2, 1, self.symbol)
a2 -= 1
self.dotest(a2, 0, self.symbol)
self.dotest(a2 - 2, -2, self.symbol)
with self.assertRaises(Exception):
a1 -= Amount(1, asset=self.asset2)
def test_mul(self):
a1 = Amount(5, self.symbol)
a2 = Amount(2, self.symbol)
self.dotest(a1 * a2, 10, self.symbol)
self.dotest(a1 * 3, 15, self.symbol)
with self.assertRaises(Exception):
a1 * Amount(1, asset=self.asset2)
# inline
a2 = Amount(2, self.symbol)
a2 *= 5
self.dotest(a2, 10, self.symbol)
a2 = Amount(2, self.symbol)
a2 *= a1
self.dotest(a2, 10, self.symbol)
with self.assertRaises(Exception):
a1 *= Amount(2, asset=self.asset2)
def test_div(self):
a1 = Amount(15, self.symbol)
self.dotest(a1 / 3, 5, self.symbol)
self.dotest(a1 // 2, 7, self.symbol)
with self.assertRaises(Exception):
a1 / Amount(1, asset=self.asset2)
# inline
a2 = a1.copy()
a2 /= 3
self.dotest(a2, 5, self.symbol)
a2 = a1.copy()
a2 //= 2
self.dotest(a2, 7, self.symbol)
with self.assertRaises(Exception):
a1 *= Amount(2, asset=self.asset2)
def test_mod(self):
a1 = Amount(15, self.symbol)
a2 = Amount(3, self.symbol)
self.dotest(a1 % 3, 0, self.symbol)
self.dotest(a1 % a2, 0, self.symbol)
self.dotest(a1 % 2, 1, self.symbol)
with self.assertRaises(Exception):
a1 % Amount(1, asset=self.asset2)
# inline
a2 = a1.copy()
a2 %= 3
self.dotest(a2, 0, self.symbol)
with self.assertRaises(Exception):
a1 %= Amount(2, asset=self.asset2)
def test_pow(self):
a1 = Amount(15, self.symbol)
a2 = Amount(3, self.symbol)
self.dotest(a1 ** 3, 15 ** 3, self.symbol)
self.dotest(a1 ** a2, 15 ** 3, self.symbol)
self.dotest(a1 ** 2, 15 ** 2, self.symbol)
with self.assertRaises(Exception):
a1 ** Amount(1, asset=self.asset2)
# inline
a2 = a1.copy()
a2 **= 3
self.dotest(a2, 15 ** 3, self.symbol)
with self.assertRaises(Exception):
a1 **= Amount(2, asset=self.asset2)
def test_ltge(self):
a1 = Amount(1, self.symbol)
a2 = Amount(2, self.symbol)
self.assertTrue(a1 < a2)
self.assertTrue(a2 > a1)
self.assertTrue(a2 > 1)
self.assertTrue(a1 < 5)
def test_leeq(self):
a1 = Amount(1, self.symbol)
a2 = Amount(1, self.symbol)
self.assertTrue(a1 <= a2)
self.assertTrue(a1 >= a2)
self.assertTrue(a1 <= 1)
self.assertTrue(a1 >= 1)
self.assertTrue(a1 == 1.0001)
def test_ne(self):
a1 = Amount(1, self.symbol)
a2 = Amount(2, self.symbol)
self.assertTrue(a1 != a2)
self.assertTrue(a1 != 5)
a1 = Amount(1, self.symbol)
a2 = Amount(1, self.symbol)
self.assertTrue(a1 == a2)
self.assertTrue(a1 == 1)
| 34.057627 | 137 | 0.564646 |
16f9893add6e55b3ebbdce92c19e9a3b51fb5287
| 799 |
py
|
Python
|
0028-implement-strstr/solution.py
|
radelman/leetcode
|
379aede2b84050a9452bea0452c4ffc8a156b9de
|
[
"BSD-2-Clause"
] | null | null | null |
0028-implement-strstr/solution.py
|
radelman/leetcode
|
379aede2b84050a9452bea0452c4ffc8a156b9de
|
[
"BSD-2-Clause"
] | null | null | null |
0028-implement-strstr/solution.py
|
radelman/leetcode
|
379aede2b84050a9452bea0452c4ffc8a156b9de
|
[
"BSD-2-Clause"
] | null | null | null |
class Solution:
def strStr(self, haystack: str, needle: str) -> int:
if len(needle) == 0:
return 0
cumsum = [ord(c) for c in haystack]
for i in range(1, len(cumsum)):
cumsum[i] = cumsum[i - 1] + cumsum[i]
target = sum([ord(c) for c in needle])
for i in range(len(haystack) - len(needle) + 1):
first = cumsum[i - 1] if i > 0 else 0
last = cumsum[i + len(needle) - 1]
attempt = last - first
if attempt == target:
if haystack[i : i + len(needle)] == needle:
return i
return -1
def main() -> None:
test_cases = [
["hello", "ll"],
["aaaaa", "bba"]
]
solution = Solution();
for inputs in test_cases:
haystack, needle = inputs
test = solution.strStr(haystack, needle)
print(test)
if __name__ == '__main__':
main()
| 19.487805 | 53 | 0.579474 |
4bf667f64bf55711ee869cf9acac3a8e4ffe42cf
| 6,663 |
py
|
Python
|
tools/tiny-test-fw/IDF/IDFApp.py
|
ulfalizer/esp-idf-1
|
6835bfc741bf15e98fb7971293913f770df6081f
|
[
"Apache-2.0"
] | 14 |
2018-04-23T20:34:38.000Z
|
2022-02-03T05:06:57.000Z
|
lib/third_party/mcu_vendor/espressif/esp-idf/tools/tiny-test-fw/IDF/IDFApp.py
|
dyg540/amazon-freertos
|
3d61ed00f018ac6ec0df2031556dbb71bf03617d
|
[
"MIT"
] | 19 |
2018-12-07T03:41:15.000Z
|
2020-02-05T14:42:04.000Z
|
lib/third_party/mcu_vendor/espressif/esp-idf/tools/tiny-test-fw/IDF/IDFApp.py
|
dyg540/amazon-freertos
|
3d61ed00f018ac6ec0df2031556dbb71bf03617d
|
[
"MIT"
] | 11 |
2018-08-03T10:15:33.000Z
|
2020-12-07T03:26:10.000Z
|
# Copyright 2015-2017 Espressif Systems (Shanghai) PTE LTD
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" IDF Test Applications """
import subprocess
import os
import App
class IDFApp(App.BaseApp):
"""
Implements common esp-idf application behavior.
idf applications should inherent from this class and overwrite method get_binary_path.
"""
IDF_DOWNLOAD_CONFIG_FILE = "download.config"
def __init__(self, app_path):
super(IDFApp, self).__init__(app_path)
self.idf_path = self.get_sdk_path()
self.binary_path = self.get_binary_path(app_path)
assert os.path.exists(self.binary_path)
assert self.IDF_DOWNLOAD_CONFIG_FILE in os.listdir(self.binary_path)
self.esptool, self.partition_tool = self.get_tools()
@classmethod
def get_sdk_path(cls):
idf_path = os.getenv("IDF_PATH")
assert idf_path
assert os.path.exists(idf_path)
return idf_path
@classmethod
def get_tools(cls):
idf_path = cls.get_sdk_path()
# get esptool and partition tool for esp-idf
esptool = os.path.join(idf_path, "components",
"esptool_py", "esptool", "esptool.py")
partition_tool = os.path.join(idf_path, "components",
"partition_table", "gen_esp32part.py")
assert os.path.exists(esptool) and os.path.exists(partition_tool)
return esptool, partition_tool
def get_binary_path(self, app_path):
"""
get binary path according to input app_path.
subclass must overwrite this method.
:param app_path: path of application
:return: abs app binary path
"""
pass
def process_arg(self, arg):
"""
process args in download.config. convert to abs path for .bin args. strip spaces and CRLFs.
"""
if ".bin" in arg:
ret = os.path.join(self.binary_path, arg)
else:
ret = arg
return ret.strip("\r\n ")
def process_app_info(self):
"""
get app download config and partition info from a specific app path
:return: download config, partition info
"""
with open(os.path.join(self.binary_path, self.IDF_DOWNLOAD_CONFIG_FILE), "r") as f:
configs = f.read().split(" ")
download_configs = ["--chip", "auto", "--before", "default_reset",
"--after", "hard_reset", "write_flash", "-z"]
download_configs += [self.process_arg(x) for x in configs]
# handle partition table
for partition_file in download_configs:
if "partition" in partition_file:
partition_file = os.path.join(self.binary_path, partition_file)
break
else:
raise ValueError("No partition table found for IDF binary path: {}".format(self.binary_path))
process = subprocess.Popen(["python", self.partition_tool, partition_file],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
raw_data = process.stdout.read()
if isinstance(raw_data, bytes):
raw_data = raw_data.decode()
partition_table = dict()
for line in raw_data.splitlines():
if line[0] != "#":
try:
_name, _type, _subtype, _offset, _size, _flags = line.split(",")
if _size[-1] == "K":
_size = int(_size[:-1]) * 1024
elif _size[-1] == "M":
_size = int(_size[:-1]) * 1024 * 1024
else:
_size = int(_size)
except ValueError:
continue
partition_table[_name] = {
"type": _type,
"subtype": _subtype,
"offset": _offset,
"size": _size,
"flags": _flags
}
return download_configs, partition_table
class Example(IDFApp):
def get_binary_path(self, app_path):
# build folder of example path
path = os.path.join(self.idf_path, app_path, "build")
if not os.path.exists(path):
# search for CI build folders
app = os.path.basename(app_path)
example_path = os.path.join(self.idf_path, "build_examples", "example_builds")
for dirpath, dirnames, files in os.walk(example_path):
if dirnames:
if dirnames[0] == app:
path = os.path.join(example_path, dirpath, dirnames[0], "build")
break
else:
raise OSError("Failed to find example binary")
return path
class UT(IDFApp):
def get_binary_path(self, app_path):
"""
:param app_path: app path or app config
:return: binary path
"""
if not app_path:
app_path = "default"
path = os.path.join(self.idf_path, app_path)
if not os.path.exists(path):
while True:
# try to get by config
if app_path == "default":
# it's default config, we first try to get form build folder of unit-test-app
path = os.path.join(self.idf_path, "tools", "unit-test-app", "build")
if os.path.exists(path):
# found, use bin in build path
break
# ``make ut-build-all-configs`` or ``make ut-build-CONFIG`` will copy binary to output folder
path = os.path.join(self.idf_path, "tools", "unit-test-app", "output", app_path)
if os.path.exists(path):
break
raise OSError("Failed to get unit-test-app binary path")
return path
class SSC(IDFApp):
def get_binary_path(self, app_path):
# TODO: to implement SSC get binary path
return app_path
class AT(IDFApp):
def get_binary_path(self, app_path):
# TODO: to implement AT get binary path
return app_path
| 36.60989 | 109 | 0.575867 |
0498ab15cdc44c9270e1df2952701af2ce2e520a
| 1,593 |
py
|
Python
|
detailsScrape/senmoistd/senmoistd26.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | null | null | null |
detailsScrape/senmoistd/senmoistd26.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | 2 |
2021-02-03T01:55:13.000Z
|
2021-04-30T12:46:33.000Z
|
detailsScrape/senmoistd/senmoistd26.py
|
Asyikin98/SkinFerm
|
72fd1ad6339c96adf5ec154bde566de9eb1472c3
|
[
"MIT"
] | null | null | null |
import urllib.request
import random
from bs4 import BeautifulSoup
from requests import get
import mysql.connector
conn = mysql.connector.connect(user="root", passwd="",host="localhost", database="product")
cursor = conn.cursor()
sql = """INSERT INTO senmoistd (about, rate, top, comment, dari) VALUES (%s, %s, %s, %s, %s)"""
def crawl_url(pageUrl, moistsend_arr):
url = 'https://www.ulta.com/daily-advance-lotion?productId=xlsImpprod12041661'
page = get(url)
soup = BeautifulSoup(page.text, 'html.parser')
type(soup)
#######################################################for product 1############################################################################
moist = soup.find_all('div', class_='ProductDetail__productImage ProductDetail__productImage--withoutSwatches')
try:
for moistd in moist :
about = soup.find("div",{"class":"ProductMainSection"}).get_text().strip()
rate = soup.find("div",{"class":"ProductDetail__productContent"}).get_text().strip()
top = soup.find("p",{"class":"MixedMenuButton__Text MixedMenuButton__Text--label"}).get_text().strip()
comment = soup.find("div",{"class":"Collapsible__contentInner"}).get_text().strip()
dari = soup.find("div",{"class":"ProductDetail__ingredients"}).get_text().strip()
moistsend_arr.append((about, rate, top, comment, dari))
finally:
return moistsend_arr
moistsend_arr = crawl_url("", [])
print(len(moistsend_arr))
cursor.executemany(sql, moistsend_arr)
conn.commit()
cursor.close()
conn.close()
| 36.204545 | 148 | 0.624608 |
49a626ea1ecb299d07ce918b79729b1630fc007a
| 29,432 |
py
|
Python
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/_service_endpoint_policies_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | 8 |
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/_service_endpoint_policies_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
sdk/network/azure-mgmt-network/azure/mgmt/network/v2018_08_01/operations/_service_endpoint_policies_operations.py
|
vbarbaresi/azure-sdk-for-python
|
397ba46c51d001ff89c66b170f5576cf8f49c05f
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import TYPE_CHECKING
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.paging import ItemPaged
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import HttpRequest, HttpResponse
from azure.core.polling import LROPoller, NoPolling, PollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.arm_polling import ARMPolling
from .. import models
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]]
class ServiceEndpointPoliciesOperations(object):
"""ServiceEndpointPoliciesOperations operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2018_08_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer):
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
def _delete_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> None
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_delete(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
**kwargs # type: Any
):
# type: (...) -> LROPoller[None]
"""Deletes the specified service endpoint policy.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._delete_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def get(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
expand=None, # type: Optional[str]
**kwargs # type: Any
):
# type: (...) -> "models.ServiceEndpointPolicy"
"""Gets the specified service Endpoint Policies in a specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ServiceEndpointPolicy, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2018_08_01.models.ServiceEndpointPolicy
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def _create_or_update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "models.ServiceEndpointPolicy"
**kwargs # type: Any
):
# type: (...) -> "models.ServiceEndpointPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'ServiceEndpointPolicy')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_create_or_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "models.ServiceEndpointPolicy"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ServiceEndpointPolicy"]
"""Creates or updates a service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to the create or update service endpoint policy
operation.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.ServiceEndpointPolicy
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._create_or_update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def _update_initial(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> "models.ServiceEndpointPolicy"
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicy"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceEndpointPolicyName': self._serialize.url("service_endpoint_policy_name", service_endpoint_policy_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'TagsObject')
body_content_kwargs['content'] = body_content
request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def begin_update(
self,
resource_group_name, # type: str
service_endpoint_policy_name, # type: str
parameters, # type: "models.TagsObject"
**kwargs # type: Any
):
# type: (...) -> LROPoller["models.ServiceEndpointPolicy"]
"""Updates service Endpoint Policies.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_endpoint_policy_name: The name of the service endpoint policy.
:type service_endpoint_policy_name: str
:param parameters: Parameters supplied to update service endpoint policy tags.
:type parameters: ~azure.mgmt.network.v2018_08_01.models.TagsObject
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.PollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of LROPoller that returns either ServiceEndpointPolicy or the result of cls(response)
:rtype: ~azure.core.polling.LROPoller[~azure.mgmt.network.v2018_08_01.models.ServiceEndpointPolicy]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, PollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicy"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = self._update_initial(
resource_group_name=resource_group_name,
service_endpoint_policy_name=service_endpoint_policy_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicy', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = ARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = NoPolling()
else: polling_method = polling
if cont_token:
return LROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return LROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies/{serviceEndpointPolicyName}'} # type: ignore
def list(
self,
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ServiceEndpointPolicyListResult"]
"""Gets all the service endpoint policies in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/ServiceEndpointPolicies'} # type: ignore
def list_by_resource_group(
self,
resource_group_name, # type: str
**kwargs # type: Any
):
# type: (...) -> Iterable["models.ServiceEndpointPolicyListResult"]
"""Gets all service endpoint Policies in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ServiceEndpointPolicyListResult or the result of cls(response)
:rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.network.v2018_08_01.models.ServiceEndpointPolicyListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.ServiceEndpointPolicyListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2018-08-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
def extract_data(pipeline_response):
deserialized = self._deserialize('ServiceEndpointPolicyListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, iter(list_of_elem)
def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return ItemPaged(
get_next, extract_data
)
list_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/serviceEndpointPolicies'} # type: ignore
| 49.217391 | 214 | 0.667165 |
373e5b8337b3d95d6a3b3e235086c1678b41e361
| 7,875 |
py
|
Python
|
custom_components/acthor/acthor/pymodbus_vendor/pdu.py
|
jatty/hass-acthor
|
9d5aaed3f01e9288fef031b47b0808e6e80c22d3
|
[
"MIT"
] | null | null | null |
custom_components/acthor/acthor/pymodbus_vendor/pdu.py
|
jatty/hass-acthor
|
9d5aaed3f01e9288fef031b47b0808e6e80c22d3
|
[
"MIT"
] | null | null | null |
custom_components/acthor/acthor/pymodbus_vendor/pdu.py
|
jatty/hass-acthor
|
9d5aaed3f01e9288fef031b47b0808e6e80c22d3
|
[
"MIT"
] | null | null | null |
"""
Contains base classes for modbus request/response/error packets
"""
# --------------------------------------------------------------------------- #
# Logging
# --------------------------------------------------------------------------- #
import logging
from .compat import byte2int, int2byte, iteritems
from .constants import Defaults
from .exceptions import NotImplementedException
from .interfaces import Singleton
from .utilities import rtuFrameSize
_logger = logging.getLogger(__name__)
# --------------------------------------------------------------------------- #
# Base PDU's
# --------------------------------------------------------------------------- #
class ModbusPDU(object):
"""
Base class for all Modbus messages
.. attribute:: transaction_id
This value is used to uniquely identify a request
response pair. It can be implemented as a simple counter
.. attribute:: protocol_id
This is a constant set at 0 to indicate Modbus. It is
put here for ease of expansion.
.. attribute:: unit_id
This is used to route the request to the correct child. In
the TCP modbus, it is used for routing (or not used at all. However,
for the serial versions, it is used to specify which child to perform
the requests against. The value 0x00 represents the broadcast address
(also 0xff).
.. attribute:: check
This is used for LRC/CRC in the serial modbus protocols
.. attribute:: skip_encode
This is used when the message payload has already been encoded.
Generally this will occur when the PayloadBuilder is being used
to create a complicated message. By setting this to True, the
request will pass the currently encoded message through instead
of encoding it again.
"""
def __init__(self, **kwargs):
""" Initializes the base data for a modbus request """
self.transaction_id = kwargs.get('transaction', Defaults.TransactionId)
self.protocol_id = kwargs.get('protocol', Defaults.ProtocolId)
self.unit_id = kwargs.get('unit', Defaults.UnitId)
self.skip_encode = kwargs.get('skip_encode', False)
self.check = 0x0000
def encode(self):
""" Encodes the message
:raises: A not implemented exception
"""
raise NotImplementedException()
def decode(self, data):
""" Decodes data part of the message.
:param data: is a string object
:raises: A not implemented exception
"""
raise NotImplementedException()
@classmethod
def calculateRtuFrameSize(cls, buffer):
""" Calculates the size of a PDU.
:param buffer: A buffer containing the data that have been received.
:returns: The number of bytes in the PDU.
"""
if hasattr(cls, '_rtu_frame_size'):
return cls._rtu_frame_size
elif hasattr(cls, '_rtu_byte_count_pos'):
return rtuFrameSize(buffer, cls._rtu_byte_count_pos)
else:
raise NotImplementedException(
"Cannot determine RTU frame size for %s" % cls.__name__)
class ModbusRequest(ModbusPDU):
""" Base class for a modbus request PDU """
def __init__(self, **kwargs):
""" Proxy to the lower level initializer """
ModbusPDU.__init__(self, **kwargs)
def doException(self, exception):
""" Builds an error response based on the function
:param exception: The exception to return
:raises: An exception response
"""
exc = ExceptionResponse(self.function_code, exception)
_logger.error(exc)
return exc
class ModbusResponse(ModbusPDU):
""" Base class for a modbus response PDU
.. attribute:: should_respond
A flag that indicates if this response returns a result back
to the client issuing the request
.. attribute:: _rtu_frame_size
Indicates the size of the modbus rtu response used for
calculating how much to read.
"""
should_respond = True
def __init__(self, **kwargs):
""" Proxy to the lower level initializer """
ModbusPDU.__init__(self, **kwargs)
def isError(self):
"""Checks if the error is a success or failure"""
return self.function_code > 0x80
# --------------------------------------------------------------------------- #
# Exception PDU's
# --------------------------------------------------------------------------- #
class ModbusExceptions(Singleton):
"""
An enumeration of the valid modbus exceptions
"""
IllegalFunction = 0x01
IllegalAddress = 0x02
IllegalValue = 0x03
SlaveFailure = 0x04
Acknowledge = 0x05
SlaveBusy = 0x06
MemoryParityError = 0x08
GatewayPathUnavailable = 0x0A
GatewayNoResponse = 0x0B
@classmethod
def decode(cls, code):
""" Given an error code, translate it to a
string error name.
:param code: The code number to translate
"""
values = dict((v, k) for k, v in iteritems(cls.__dict__)
if not k.startswith('__') and not callable(v))
return values.get(code, None)
class ExceptionResponse(ModbusResponse):
""" Base class for a modbus exception PDU """
ExceptionOffset = 0x80
_rtu_frame_size = 5
def __init__(self, function_code, exception_code=None, **kwargs):
""" Initializes the modbus exception response
:param function_code: The function to build an exception response for
:param exception_code: The specific modbus exception to return
"""
ModbusResponse.__init__(self, **kwargs)
self.original_code = function_code
self.function_code = function_code | self.ExceptionOffset
self.exception_code = exception_code
def encode(self):
""" Encodes a modbus exception response
:returns: The encoded exception packet
"""
return int2byte(self.exception_code)
def decode(self, data):
""" Decodes a modbus exception response
:param data: The packet data to decode
"""
self.exception_code = byte2int(data[0])
def __str__(self):
""" Builds a representation of an exception response
:returns: The string representation of an exception response
"""
message = ModbusExceptions.decode(self.exception_code)
parameters = (self.function_code, self.original_code, message)
return "Exception Response(%d, %d, %s)" % parameters
class IllegalFunctionRequest(ModbusRequest):
"""
Defines the Modbus slave exception type 'Illegal Function'
This exception code is returned if the slave::
- does not implement the function code **or**
- is not in a state that allows it to process the function
"""
ErrorCode = 1
def __init__(self, function_code, **kwargs):
""" Initializes a IllegalFunctionRequest
:param function_code: The function we are erroring on
"""
ModbusRequest.__init__(self, **kwargs)
self.function_code = function_code
def decode(self, data):
""" This is here so this failure will run correctly
:param data: Not used
"""
pass
def execute(self, context):
""" Builds an illegal function request error response
:param context: The current context for the message
:returns: The error response packet
"""
return ExceptionResponse(self.function_code, self.ErrorCode)
# --------------------------------------------------------------------------- #
# Exported symbols
# --------------------------------------------------------------------------- #
__all__ = [
'ModbusRequest', 'ModbusResponse', 'ModbusExceptions',
'ExceptionResponse', 'IllegalFunctionRequest',
]
| 31.5 | 79 | 0.605079 |
281963f3f5c1860a91dd55b88a65955859bf51bc
| 42,425 |
py
|
Python
|
django/db/models/fields/__init__.py
|
t11e/django
|
447f5375d378dba3bac1ded0306fa0d1b8ab55a4
|
[
"BSD-3-Clause"
] | 1 |
2016-05-08T13:32:33.000Z
|
2016-05-08T13:32:33.000Z
|
django/db/models/fields/__init__.py
|
t11e/django
|
447f5375d378dba3bac1ded0306fa0d1b8ab55a4
|
[
"BSD-3-Clause"
] | null | null | null |
django/db/models/fields/__init__.py
|
t11e/django
|
447f5375d378dba3bac1ded0306fa0d1b8ab55a4
|
[
"BSD-3-Clause"
] | null | null | null |
import datetime
import decimal
import re
import time
import math
import django.utils.copycompat as copy
from django.db import connection
from django.db.models.fields.subclassing import LegacyConnection
from django.db.models.query_utils import QueryWrapper
from django.conf import settings
from django import forms
from django.core import exceptions, validators
from django.utils.datastructures import DictWrapper
from django.utils.functional import curry
from django.utils.itercompat import tee
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy as _
from django.utils.encoding import smart_unicode, force_unicode, smart_str
from django.utils import datetime_safe
class NOT_PROVIDED:
pass
# The values to use for "blank" in SelectFields. Will be appended to the start of most "choices" lists.
BLANK_CHOICE_DASH = [("", "---------")]
BLANK_CHOICE_NONE = [("", "None")]
class FieldDoesNotExist(Exception):
pass
# A guide to Field parameters:
#
# * name: The name of the field specifed in the model.
# * attname: The attribute to use on the model object. This is the same as
# "name", except in the case of ForeignKeys, where "_id" is
# appended.
# * db_column: The db_column specified in the model (or None).
# * column: The database column for this field. This is the same as
# "attname", except if db_column is specified.
#
# Code that introspects values, or does other dynamic things, should use
# attname. For example, this gets the primary key value of object "obj":
#
# getattr(obj, opts.pk.attname)
class Field(object):
"""Base class for all field types"""
__metaclass__ = LegacyConnection
# Designates whether empty strings fundamentally are allowed at the
# database level.
empty_strings_allowed = True
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
default_validators = [] # Default set of validators
default_error_messages = {
'invalid_choice': _(u'Value %r is not a valid choice.'),
'null': _(u'This field cannot be null.'),
'blank': _(u'This field cannot be blank.'),
}
# Generic field type description, usually overriden by subclasses
def _description(self):
return _(u'Field of type: %(field_type)s') % {
'field_type': self.__class__.__name__
}
description = property(_description)
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False,
db_index=False, rel=None, default=NOT_PROVIDED, editable=True,
serialize=True, unique_for_date=None, unique_for_month=None,
unique_for_year=None, choices=None, help_text='', db_column=None,
db_tablespace=None, auto_created=False, validators=[],
error_messages=None):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
self.max_length, self._unique = max_length, unique
self.blank, self.null = blank, null
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if self.empty_strings_allowed and connection.features.interprets_empty_strings_as_nulls:
self.null = True
self.rel = rel
self.default = default
self.editable = editable
self.serialize = serialize
self.unique_for_date, self.unique_for_month = unique_for_date, unique_for_month
self.unique_for_year = unique_for_year
self._choices = choices or []
self.help_text = help_text
self.db_column = db_column
self.db_tablespace = db_tablespace or settings.DEFAULT_INDEX_TABLESPACE
self.auto_created = auto_created
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
self.validators = self.default_validators + validators
messages = {}
for c in reversed(self.__class__.__mro__):
messages.update(getattr(c, 'default_error_messages', {}))
messages.update(error_messages or {})
self.error_messages = messages
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
"""
return value
def run_validators(self, value):
if value in validators.EMPTY_VALUES:
return
errors = []
for v in self.validators:
try:
v(value)
except exceptions.ValidationError, e:
if hasattr(e, 'code') and e.code in self.error_messages:
message = self.error_messages[e.code]
if e.params:
message = message % e.params
errors.append(message)
else:
errors.extend(e.messages)
if errors:
raise exceptions.ValidationError(errors)
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
return
if self._choices and value:
for option_key, option_value in self.choices:
if isinstance(option_value, (list, tuple)):
# This is an optgroup, so look inside the group for options.
for optgroup_key, optgroup_value in option_value:
if value == optgroup_key:
return
elif value == option_key:
return
raise exceptions.ValidationError(self.error_messages['invalid_choice'] % value)
if value is None and not self.null:
raise exceptions.ValidationError(self.error_messages['null'])
if not self.blank and value in validators.EMPTY_VALUES:
raise exceptions.ValidationError(self.error_messages['blank'])
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors from to_python
and validate are propagated. The correct value is returned if no error is
raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
self.run_validators(value)
return value
def db_type(self, connection):
"""
Returns the database column data type for this field, for the provided
connection.
"""
# The default implementation of this method looks at the
# backend-specific DATA_TYPES dictionary, looking up the field by its
# "internal type".
#
# A Field class can implement the get_internal_type() method to specify
# which *preexisting* Django Field class it's most similar to -- i.e.,
# an XMLField is represented by a TEXT column type, which is the same
# as the TextField Django field type, which means XMLField's
# get_internal_type() returns 'TextField'.
#
# But the limitation of the get_internal_type() / data_types approach
# is that it cannot handle database column types that aren't already
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
def unique(self):
return self._unique or self.primary_key
unique = property(unique)
def set_attributes_from_name(self, name):
self.name = name
self.attname, self.column = self.get_attname_column()
if self.verbose_name is None and name:
self.verbose_name = name.replace('_', ' ')
def contribute_to_class(self, cls, name):
self.set_attributes_from_name(name)
cls._meta.add_field(self)
if self.choices:
setattr(cls, 'get_%s_display' % self.name, curry(cls._get_FIELD_display, field=self))
def get_attname(self):
return self.name
def get_attname_column(self):
attname = self.get_attname()
column = self.db_column or attname
return attname, column
def get_cache_name(self):
return '_%s_cache' % self.name
def get_internal_type(self):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"Perform preliminary non-db specific value checks and conversions."
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
Used by the default implementations of ``get_db_prep_save``and
`get_db_prep_lookup```
"""
if not prepared:
value = self.get_prep_value(value)
return value
def get_db_prep_save(self, value, connection):
"Returns field's value prepared for saving into a database."
return self.get_db_prep_value(value, connection=connection, prepared=False)
def get_prep_lookup(self, lookup_type, value):
"Perform preliminary non-db specific lookup checks and conversions"
if hasattr(value, 'prepare'):
return value.prepare()
if hasattr(value, '_prepare'):
return value._prepare()
if lookup_type in (
'regex', 'iregex', 'month', 'day', 'week_day', 'search',
'contains', 'icontains', 'iexact', 'startswith', 'istartswith',
'endswith', 'iendswith', 'isnull'
):
return value
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return self.get_prep_value(value)
elif lookup_type in ('range', 'in'):
return [self.get_prep_value(v) for v in value]
elif lookup_type == 'year':
try:
return int(value)
except ValueError:
raise ValueError("The __year lookup type requires an integer argument")
raise TypeError("Field has invalid lookup: %s" % lookup_type)
def get_db_prep_lookup(self, lookup_type, value, connection, prepared=False):
"Returns field's value prepared for database lookup."
if not prepared:
value = self.get_prep_lookup(lookup_type, value)
if hasattr(value, 'get_compiler'):
value = value.get_compiler(connection=connection)
if hasattr(value, 'as_sql') or hasattr(value, '_as_sql'):
# If the value has a relabel_aliases method, it will need to
# be invoked before the final SQL is evaluated
if hasattr(value, 'relabel_aliases'):
return value
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
else:
sql, params = value._as_sql(connection=connection)
return QueryWrapper(('(%s)' % sql), params)
if lookup_type in ('regex', 'iregex', 'month', 'day', 'week_day', 'search'):
return [value]
elif lookup_type in ('exact', 'gt', 'gte', 'lt', 'lte'):
return [self.get_db_prep_value(value, connection=connection, prepared=prepared)]
elif lookup_type in ('range', 'in'):
return [self.get_db_prep_value(v, connection=connection, prepared=prepared) for v in value]
elif lookup_type in ('contains', 'icontains'):
return ["%%%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'iexact':
return [connection.ops.prep_for_iexact_query(value)]
elif lookup_type in ('startswith', 'istartswith'):
return ["%s%%" % connection.ops.prep_for_like_query(value)]
elif lookup_type in ('endswith', 'iendswith'):
return ["%%%s" % connection.ops.prep_for_like_query(value)]
elif lookup_type == 'isnull':
return []
elif lookup_type == 'year':
if self.get_internal_type() == 'DateField':
return connection.ops.year_lookup_bounds_for_date_field(value)
else:
return connection.ops.year_lookup_bounds(value)
def has_default(self):
"Returns a boolean of whether this field has a default value."
return self.default is not NOT_PROVIDED
def get_default(self):
"Returns the default value for this field."
if self.has_default():
if callable(self.default):
return self.default()
return force_unicode(self.default, strings_only=True)
if not self.empty_strings_allowed or (self.null and not connection.features.interprets_empty_strings_as_nulls):
return None
return ""
def get_validator_unique_lookup_type(self):
return '%s__exact' % self.name
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
first_choice = include_blank and blank_choice or []
if self.choices:
return first_choice + list(self.choices)
rel_model = self.rel.to
if hasattr(self.rel, 'get_related_field'):
lst = [(getattr(x, self.rel.get_related_field().attname), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
else:
lst = [(x._get_pk_val(), smart_unicode(x)) for x in rel_model._default_manager.complex_filter(self.rel.limit_choices_to)]
return first_choice + lst
def get_choices_default(self):
return self.get_choices()
def get_flatchoices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"Returns flattened choices with a default blank choice included."
first_choice = include_blank and blank_choice or []
return first_choice + list(self.flatchoices)
def _get_val_from_obj(self, obj):
if obj is not None:
return getattr(obj, self.attname)
else:
return self.get_default()
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return smart_unicode(self._get_val_from_obj(obj))
def bind(self, fieldmapping, original, bound_field_class):
return bound_field_class(self, fieldmapping, original)
def _get_choices(self):
if hasattr(self._choices, 'next'):
choices, self._choices = tee(self._choices)
return choices
else:
return self._choices
choices = property(_get_choices)
def _get_flatchoices(self):
"""Flattened version of choices tuple."""
flat = []
for choice, value in self.choices:
if isinstance(value, (list, tuple)):
flat.extend(value)
else:
flat.append((choice,value))
return flat
flatchoices = property(_get_flatchoices)
def save_form_data(self, instance, data):
setattr(instance, self.name, data)
def formfield(self, form_class=forms.CharField, **kwargs):
"Returns a django.forms.Field instance for this database Field."
defaults = {'required': not self.blank, 'label': capfirst(self.verbose_name), 'help_text': self.help_text}
if self.has_default():
if callable(self.default):
defaults['initial'] = self.default
defaults['show_hidden_initial'] = True
else:
defaults['initial'] = self.get_default()
if self.choices:
# Fields with choices get special treatment.
include_blank = self.blank or not (self.has_default() or 'initial' in kwargs)
defaults['choices'] = self.get_choices(include_blank=include_blank)
defaults['coerce'] = self.to_python
if self.null:
defaults['empty_value'] = None
form_class = forms.TypedChoiceField
# Many of the subclass-specific formfield arguments (min_value,
# max_value) don't apply for choice fields, so be sure to only pass
# the values that TypedChoiceField will understand.
for k in kwargs.keys():
if k not in ('coerce', 'empty_value', 'choices', 'required',
'widget', 'label', 'initial', 'help_text',
'error_messages', 'show_hidden_initial'):
del kwargs[k]
defaults.update(kwargs)
return form_class(**defaults)
def value_from_object(self, obj):
"Returns the value of this field in the given model instance."
return getattr(obj, self.attname)
class AutoField(Field):
description = _("Integer")
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be an integer.'),
}
def __init__(self, *args, **kwargs):
assert kwargs.get('primary_key', False) is True, "%ss must have primary_key=True." % self.__class__.__name__
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def validate(self, value, model_instance):
pass
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def contribute_to_class(self, cls, name):
assert not cls._meta.has_auto_field, "A model can't have more than one AutoField."
super(AutoField, self).contribute_to_class(cls, name)
cls._meta.has_auto_field = True
cls._meta.auto_field = self
def formfield(self, **kwargs):
return None
class BooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be either True or False.'),
}
description = _("Boolean (Either True or False)")
def __init__(self, *args, **kwargs):
kwargs['blank'] = True
if 'default' not in kwargs and not kwargs.get('null'):
kwargs['default'] = False
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "BooleanField"
def to_python(self, value):
if value in (True, False):
# if value is 1 or 0 than it's equal to True or False, but we want
# to return a true bool for semantic reasons.
return bool(value)
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(BooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
# Unlike most fields, BooleanField figures out include_blank from
# self.null instead of self.blank.
if self.choices:
include_blank = self.null or not (self.has_default() or 'initial' in kwargs)
defaults = {'choices': self.get_choices(include_blank=include_blank)}
else:
defaults = {'form_class': forms.BooleanField}
defaults.update(kwargs)
return super(BooleanField, self).formfield(**defaults)
class CharField(Field):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
super(CharField, self).__init__(*args, **kwargs)
self.validators.append(validators.MaxLengthValidator(self.max_length))
def get_internal_type(self):
return "CharField"
def to_python(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
# Passing max_length to forms.CharField means that the value's length
# will be validated twice. This is considered acceptable since we want
# the value in the form field (to pass into widget for example).
defaults = {'max_length': self.max_length}
defaults.update(kwargs)
return super(CharField, self).formfield(**defaults)
# TODO: Maybe move this into contrib, because it's specialized.
class CommaSeparatedIntegerField(CharField):
default_validators = [validators.validate_comma_separated_integer_list]
description = _("Comma-separated integers")
def formfield(self, **kwargs):
defaults = {
'error_messages': {
'invalid': _(u'Enter only digits separated by commas.'),
}
}
defaults.update(kwargs)
return super(CommaSeparatedIntegerField, self).formfield(**defaults)
ansi_date_re = re.compile(r'^\d{4}-\d{1,2}-\d{1,2}$')
class DateField(Field):
description = _("Date (without time)")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid date in YYYY-MM-DD format.'),
'invalid_date': _('Invalid date: %s'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
#HACKs : auto_now_add/auto_now should be done as a default or a pre_save.
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DateField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value.date()
if isinstance(value, datetime.date):
return value
if not ansi_date_re.search(value):
raise exceptions.ValidationError(self.error_messages['invalid'])
# Now that we have the date string in YYYY-MM-DD format, check to make
# sure it's a valid date.
# We could use time.strptime here and catch errors, but datetime.date
# produces much friendlier error messages.
year, month, day = map(int, value.split('-'))
try:
return datetime.date(year, month, day)
except ValueError, e:
msg = self.error_messages['invalid_date'] % _(str(e))
raise exceptions.ValidationError(msg)
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now()
setattr(model_instance, self.attname, value)
return value
else:
return super(DateField, self).pre_save(model_instance, add)
def contribute_to_class(self, cls, name):
super(DateField,self).contribute_to_class(cls, name)
if not self.null:
setattr(cls, 'get_next_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=True))
setattr(cls, 'get_previous_by_%s' % self.name,
curry(cls._get_next_or_previous_by_FIELD, field=self, is_next=False))
def get_prep_lookup(self, lookup_type, value):
# For "__month", "__day", and "__week_day" lookups, convert the value
# to an int so the database backend always sees a consistent type.
if lookup_type in ('month', 'day', 'week_day'):
return int(value)
return super(DateField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_date(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = datetime_safe.new_date(val).strftime("%Y-%m-%d")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateField}
defaults.update(kwargs)
return super(DateField, self).formfield(**defaults)
class DateTimeField(DateField):
default_error_messages = {
'invalid': _(u'Enter a valid date/time in YYYY-MM-DD HH:MM[:ss[.uuuuuu]] format.'),
}
description = _("Date (with time)")
def get_internal_type(self):
return "DateTimeField"
def to_python(self, value):
if value is None:
return value
if isinstance(value, datetime.datetime):
return value
if isinstance(value, datetime.date):
return datetime.datetime(value.year, value.month, value.day)
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M:%S')[:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.datetime(*time.strptime(value, '%Y-%m-%d %H:%M')[:5],
**kwargs)
except ValueError: # Try without hour/minutes/seconds.
try:
return datetime.datetime(*time.strptime(value, '%Y-%m-%d')[:3],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts dates into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_datetime(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
d = datetime_safe.new_datetime(val)
data = d.strftime('%Y-%m-%d %H:%M:%S')
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.DateTimeField}
defaults.update(kwargs)
return super(DateTimeField, self).formfield(**defaults)
class DecimalField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _(u'This value must be a decimal number.'),
}
description = _("Decimal number")
def __init__(self, verbose_name=None, name=None, max_digits=None, decimal_places=None, **kwargs):
self.max_digits, self.decimal_places = max_digits, decimal_places
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "DecimalField"
def to_python(self, value):
if value is None:
return value
try:
return decimal.Decimal(value)
except decimal.InvalidOperation:
raise exceptions.ValidationError(self.error_messages['invalid'])
def _format(self, value):
if isinstance(value, basestring) or value is None:
return value
else:
return self.format_number(value)
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.util.
#
# It is preserved because it is used by the oracle backend
# (django.db.backends.oracle.query), and also for
# backwards-compatibility with any external code which may have used
# this method.
from django.db.backends import util
return util.format_number(value, self.max_digits, self.decimal_places)
def get_db_prep_save(self, value, connection):
return connection.ops.value_to_db_decimal(self.to_python(value),
self.max_digits, self.decimal_places)
def get_prep_value(self, value):
return self.to_python(value)
def formfield(self, **kwargs):
defaults = {
'max_digits': self.max_digits,
'decimal_places': self.decimal_places,
'form_class': forms.DecimalField,
}
defaults.update(kwargs)
return super(DecimalField, self).formfield(**defaults)
class EmailField(CharField):
default_validators = [validators.validate_email]
description = _("E-mail address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 75)
CharField.__init__(self, *args, **kwargs)
class FilePathField(Field):
description = _("File path")
def __init__(self, verbose_name=None, name=None, path='', match=None, recursive=False, **kwargs):
self.path, self.match, self.recursive = path, match, recursive
kwargs['max_length'] = kwargs.get('max_length', 100)
Field.__init__(self, verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {
'path': self.path,
'match': self.match,
'recursive': self.recursive,
'form_class': forms.FilePathField,
}
defaults.update(kwargs)
return super(FilePathField, self).formfield(**defaults)
def get_internal_type(self):
return "FilePathField"
class FloatField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be a float."),
}
description = _("Floating point number")
def get_prep_value(self, value):
if value is None:
return None
return float(value)
def get_internal_type(self):
return "FloatField"
def to_python(self, value):
if value is None:
return value
try:
return float(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.FloatField}
defaults.update(kwargs)
return super(FloatField, self).formfield(**defaults)
class IntegerField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be an integer."),
}
description = _("Integer")
def get_prep_value(self, value):
if value is None:
return None
return int(value)
def get_prep_lookup(self, lookup_type, value):
if (lookup_type == 'gte' or lookup_type == 'lt') \
and isinstance(value, float):
value = math.ceil(value)
return super(IntegerField, self).get_prep_lookup(lookup_type, value)
def get_internal_type(self):
return "IntegerField"
def to_python(self, value):
if value is None:
return value
try:
return int(value)
except (TypeError, ValueError):
raise exceptions.ValidationError(self.error_messages['invalid'])
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(IntegerField, self).formfield(**defaults)
class BigIntegerField(IntegerField):
empty_strings_allowed = False
description = _("Big (8 byte) integer")
MAX_BIGINT = 9223372036854775807
def get_internal_type(self):
return "BigIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': -BigIntegerField.MAX_BIGINT - 1,
'max_value': BigIntegerField.MAX_BIGINT}
defaults.update(kwargs)
return super(BigIntegerField, self).formfield(**defaults)
class IPAddressField(Field):
empty_strings_allowed = False
description = _("IP address")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = 15
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "IPAddressField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.IPAddressField}
defaults.update(kwargs)
return super(IPAddressField, self).formfield(**defaults)
class NullBooleanField(Field):
empty_strings_allowed = False
default_error_messages = {
'invalid': _("This value must be either None, True or False."),
}
description = _("Boolean (Either True, False or None)")
def __init__(self, *args, **kwargs):
kwargs['null'] = True
kwargs['blank'] = True
Field.__init__(self, *args, **kwargs)
def get_internal_type(self):
return "NullBooleanField"
def to_python(self, value):
if value is None:
return None
if value in (True, False):
return bool(value)
if value in ('None',):
return None
if value in ('t', 'True', '1'):
return True
if value in ('f', 'False', '0'):
return False
raise exceptions.ValidationError(self.error_messages['invalid'])
def get_prep_lookup(self, lookup_type, value):
# Special-case handling for filters coming from a web request (e.g. the
# admin interface). Only works for scalar values (not lists). If you're
# passing in a list, you might as well make things the right type when
# constructing the list.
if value in ('1', '0'):
value = bool(int(value))
return super(NullBooleanField, self).get_prep_lookup(lookup_type, value)
def get_prep_value(self, value):
if value is None:
return None
return bool(value)
def formfield(self, **kwargs):
defaults = {
'form_class': forms.NullBooleanField,
'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
defaults.update(kwargs)
return super(NullBooleanField, self).formfield(**defaults)
class PositiveIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveIntegerField, self).formfield(**defaults)
class PositiveSmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "PositiveSmallIntegerField"
def formfield(self, **kwargs):
defaults = {'min_value': 0}
defaults.update(kwargs)
return super(PositiveSmallIntegerField, self).formfield(**defaults)
class SlugField(CharField):
description = _("String (up to %(max_length)s)")
def __init__(self, *args, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 50)
# Set db_index=True unless it's been set manually.
if 'db_index' not in kwargs:
kwargs['db_index'] = True
super(SlugField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return "SlugField"
def formfield(self, **kwargs):
defaults = {'form_class': forms.SlugField}
defaults.update(kwargs)
return super(SlugField, self).formfield(**defaults)
class SmallIntegerField(IntegerField):
description = _("Integer")
def get_internal_type(self):
return "SmallIntegerField"
class TextField(Field):
description = _("Text")
def get_internal_type(self):
return "TextField"
def get_prep_value(self, value):
if isinstance(value, basestring) or value is None:
return value
return smart_unicode(value)
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(TextField, self).formfield(**defaults)
class TimeField(Field):
description = _("Time")
empty_strings_allowed = False
default_error_messages = {
'invalid': _('Enter a valid time in HH:MM[:ss[.uuuuuu]] format.'),
}
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
Field.__init__(self, verbose_name, name, **kwargs)
def get_internal_type(self):
return "TimeField"
def to_python(self, value):
if value is None:
return None
if isinstance(value, datetime.time):
return value
if isinstance(value, datetime.datetime):
# Not usually a good idea to pass in a datetime here (it loses
# information), but this can be a side-effect of interacting with a
# database backend (e.g. Oracle), so we'll be accommodating.
return value.time()
# Attempt to parse a datetime:
value = smart_str(value)
# split usecs, because they are not recognized by strptime.
if '.' in value:
try:
value, usecs = value.split('.')
usecs = int(usecs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
else:
usecs = 0
kwargs = {'microsecond': usecs}
try: # Seconds are optional, so try converting seconds first.
return datetime.time(*time.strptime(value, '%H:%M:%S')[3:6],
**kwargs)
except ValueError:
try: # Try without seconds.
return datetime.time(*time.strptime(value, '%H:%M')[3:5],
**kwargs)
except ValueError:
raise exceptions.ValidationError(self.error_messages['invalid'])
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = datetime.datetime.now().time()
setattr(model_instance, self.attname, value)
return value
else:
return super(TimeField, self).pre_save(model_instance, add)
def get_prep_value(self, value):
return self.to_python(value)
def get_db_prep_value(self, value, connection, prepared=False):
# Casts times into the format expected by the backend
if not prepared:
value = self.get_prep_value(value)
return connection.ops.value_to_db_time(value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
if val is None:
data = ''
else:
data = val.strftime("%H:%M:%S")
return data
def formfield(self, **kwargs):
defaults = {'form_class': forms.TimeField}
defaults.update(kwargs)
return super(TimeField, self).formfield(**defaults)
class URLField(CharField):
description = _("URL")
def __init__(self, verbose_name=None, name=None, verify_exists=True, **kwargs):
kwargs['max_length'] = kwargs.get('max_length', 200)
CharField.__init__(self, verbose_name, name, **kwargs)
self.validators.append(validators.URLValidator(verify_exists=verify_exists))
class XMLField(TextField):
description = _("XML text")
def __init__(self, verbose_name=None, name=None, schema_path=None, **kwargs):
self.schema_path = schema_path
Field.__init__(self, verbose_name, name, **kwargs)
| 38.083483 | 166 | 0.629063 |
4ad676c629fc36ee0274a2fa06940c591bb575a1
| 1,385 |
py
|
Python
|
chrome/browser/resources/ssl/tls_error_assistant/gen_tls_error_assistant_proto.py
|
google-ar/chromium
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 777 |
2017-08-29T15:15:32.000Z
|
2022-03-21T05:29:41.000Z
|
chrome/browser/resources/ssl/tls_error_assistant/gen_tls_error_assistant_proto.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 66 |
2017-08-30T18:31:18.000Z
|
2021-08-02T10:59:35.000Z
|
chrome/browser/resources/ssl/tls_error_assistant/gen_tls_error_assistant_proto.py
|
harrymarkovskiy/WebARonARCore
|
2441c86a5fd975f09a6c30cddb57dfb7fc239699
|
[
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 123 |
2017-08-30T01:19:34.000Z
|
2022-03-17T22:55:31.000Z
|
#!/usr/bin/python
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Convert the ASCII tls_error_assistant.asciipb proto into a binary resource.
"""
import os
import sys
# Import the binary proto generator. Walks up to the root of the source tree
# which is six directories above, and the finds the protobufs directory from
# there.
proto_generator_path = os.path.normpath(os.path.join(os.path.abspath(__file__),
*[os.path.pardir] * 6 + ['chrome/browser/resources/protobufs']))
sys.path.insert(0, proto_generator_path)
from binary_proto_generator import BinaryProtoGenerator
class TLSErrorAssistantProtoGenerator(BinaryProtoGenerator):
def ImportProtoModule(self):
import tls_error_assistant_pb2
globals()['tls_error_assistant_pb2'] = tls_error_assistant_pb2
def EmptyProtoInstance(self):
return tls_error_assistant_pb2.TLSErrorAssistantConfig()
def ValidatePb(self, opts, pb):
assert pb.version_id > 0
assert len(pb.captive_portal_cert) > 0
def ProcessPb(self, opts, pb):
binary_pb_str = pb.SerializeToString()
outfile = os.path.join(opts.outdir, opts.outbasename)
open(outfile, 'wb').write(binary_pb_str)
def main():
return TLSErrorAssistantProtoGenerator().Run()
if __name__ == '__main__':
sys.exit(main())
| 30.777778 | 79 | 0.762455 |
f9bac9fca57b57f57ff8768b78286267e011e3eb
| 3,612 |
py
|
Python
|
legistar/old/people.py
|
datamade/python-legistar-scraper
|
ac379c6bd0437db85cecf469f34b02baedb1ae7e
|
[
"BSD-3-Clause"
] | 1 |
2021-12-27T12:07:20.000Z
|
2021-12-27T12:07:20.000Z
|
legistar/old/people.py
|
datamade/python-legistar-scraper
|
ac379c6bd0437db85cecf469f34b02baedb1ae7e
|
[
"BSD-3-Clause"
] | null | null | null |
legistar/old/people.py
|
datamade/python-legistar-scraper
|
ac379c6bd0437db85cecf469f34b02baedb1ae7e
|
[
"BSD-3-Clause"
] | 2 |
2019-05-01T20:09:29.000Z
|
2019-05-16T19:35:14.000Z
|
class MembershipAdapter(Adapter):
'''Convert a legistar scraper's membership into a pupa-compliant membership. '''
pupa_model = pupa.scrape.Membership
extras_keys = ['appointed_by']
def stringify_date(self, dt):
'''Given a datetime string, stringify it to a date,
assuming there is no time portion associated with the date.
Complain if there is.
'''
if not dt:
raise self.SkipItem()
else:
return dt.strftime('%Y-%m-%d')
#make_item('start_date')
def get_start_date(self):
return self.stringify_date(self.data.get('start_date'))
#make_item('end_date')
def get_end_date(self):
return self.stringify_date(self.data.get('end_date'))
#make_item('organization_id')
def get_org_id(self):
return self.data['organization_id']
#make_item('role')
def get_org_id(self):
'''Role defaults to empty string.
'''
return self.data['role'] or ''
def get_instance(self, **extra_instance_data):
# Get instance data.
instance_data = self.get_instance_data()
instance_data.update(extra_instance_data)
extras = instance_data.pop('extras')
# Create the instance.
instance = self.pupa_model(**instance_data)
instance.extras.update(extras)
return instance
class MembershipConverter(Converter):
adapter = MembershipAdapter
def __iter__(self):
yield from self.create_memberships()
def get_legislature(self):
'''Gets previously scrape legislature org.
'''
return self.config.org_cache[self.cfg.TOPLEVEL_ORG_MEMBERSHIP_NAME]
def get_org(self, org_name):
'''Gets or creates the org with name equal to
kwargs['name']. Caches the result.
'''
created = False
orgs = self.config.org_cache
# Get the org.
org = orgs.get(org_name)
if org is not None:
# Cache hit.
return created, org
# Create the org.
classification = self.cfg.get_org_classification(org_name)
org = pupa.scrape.Organization(
name=org_name, classification=classification)
for source in self.person.sources:
org.add_source(**source)
created = True
# Cache it.
orgs[org_name] = org
if org is not None:
# Cache hit.
return created, org
# Add a source to the org.
for source in self.person.sources:
if 'detail' in source['note']:
org.add_source(**source)
return created, org
def create_membership(self, data):
'''Retrieves the matching committee and adds this person
as a member of the committee.
'''
if 'person_id' not in data:
data['person_id'] = self.person._id
# Also drop memberships in dropped orgs.
if hasattr(self.cfg, 'should_drop_organization'):
if 'org' in data:
if self.cfg.should_drop_organization(dict(name=data['org'])):
return
# Get the committee.
if 'organization_id' not in data:
org_name = data.pop('org')
created, org = self.get_org(org_name)
if created:
yield org
# Add the person and org ids.
data['organization_id'] = org._id
# Convert the membership to pupa object.
adapter = self.make_child(self.adapter, data)
membership = adapter.get_instance()
yield membership
| 29.606557 | 85 | 0.59856 |
deb864a0273f8422e76021dd44d19bc628b6b567
| 2,208 |
py
|
Python
|
networkx/algorithms/bipartite/covering.py
|
KyleBenson/networkx
|
26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/bipartite/covering.py
|
KyleBenson/networkx
|
26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a
|
[
"BSD-3-Clause"
] | null | null | null |
networkx/algorithms/bipartite/covering.py
|
KyleBenson/networkx
|
26ccb4a380ba0e5304d7bbff53eb9859c6e4c93a
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016 NetworkX developers.
# Copyright (C) 2016 by
# Nishant Nikhil <[email protected]>
# All rights reserved.
# BSD license.
""" Functions related to graph covers."""
import networkx as nx
from networkx.utils import not_implemented_for, arbitrary_element
from networkx.algorithms.bipartite.matching import hopcroft_karp_matching
from networkx.algorithms.covering import min_edge_cover as _min_edge_cover
__all__ = ['min_edge_cover']
@not_implemented_for('directed')
@not_implemented_for('multigraph')
def min_edge_cover(G, matching_algorithm=None):
"""Returns a set of edges which constitutes
the minimum edge cover of the graph.
The smallest edge cover can be found in polynomial time by finding
a maximum matching and extending it greedily so that all nodes
are covered.
Parameters
----------
G : NetworkX graph
An undirected bipartite graph.
matching_algorithm : function
A function that returns a maximum cardinality matching in a
given bipartite graph. The function must take one input, the
graph ``G``, and return a dictionary mapping each node to its
mate. If not specified,
:func:`~networkx.algorithms.bipartite.matching.hopcroft_karp_matching`
will be used. Other possibilities include
:func:`~networkx.algorithms.bipartite.matching.eppstein_matching`,
Returns
-------
set
A set of the edges in a minimum edge cover of the graph, given as
pairs of nodes. It contains both the edges `(u, v)` and `(v, u)`
for given nodes `u` and `v` among the edges of minimum edge cover.
Notes
-----
An edge cover of a graph is a set of edges such that every node of
the graph is incident to at least one edge of the set.
A minimum edge cover is an edge covering of smallest cardinality.
Due to its implementation, the worst-case running time of this algorithm
is bounded by the worst-case running time of the function
``matching_algorithm``.
"""
if matching_algorithm is None:
matching_algorithm = hopcroft_karp_matching
return _min_edge_cover(G, matching_algorithm=matching_algorithm)
| 36.196721 | 78 | 0.717391 |
5277d3e155d6597e1092c5598a21434ea5af901c
| 17,029 |
py
|
Python
|
diffkemp/llvm_ir/kernel_source.py
|
nikopatrik/diffkemp
|
460257b2c09b84ab492019f2ae1592b7b0a0b4c0
|
[
"Apache-2.0"
] | null | null | null |
diffkemp/llvm_ir/kernel_source.py
|
nikopatrik/diffkemp
|
460257b2c09b84ab492019f2ae1592b7b0a0b4c0
|
[
"Apache-2.0"
] | null | null | null |
diffkemp/llvm_ir/kernel_source.py
|
nikopatrik/diffkemp
|
460257b2c09b84ab492019f2ae1592b7b0a0b4c0
|
[
"Apache-2.0"
] | null | null | null |
"""
Browsing kernel sources.
Functions for searching function definitions, kernel modules, etc.
"""
from diffkemp.llvm_ir.build_llvm import LlvmKernelBuilder, BuildException
from diffkemp.llvm_ir.kernel_module import LlvmKernelModule
from diffkemp.llvm_ir.llvm_sysctl_module import LlvmSysctlModule
import errno
import os
import shutil
from subprocess import CalledProcessError, check_call, check_output
class SourceNotFoundException(Exception):
def __init__(self, fun):
self.fun = fun
def __str__(self):
return "Source for {} not found".format(self.fun)
class KernelSource:
"""
Source code of a single kernel.
Provides functions to search source files for function definitions, kernel
modules, and others.
"""
def __init__(self, kernel_dir, with_builder=False):
self.kernel_dir = os.path.abspath(kernel_dir)
self.builder = LlvmKernelBuilder(kernel_dir) if with_builder else None
self.modules = dict()
def initialize(self):
"""
Prepare the kernel builder.
This is done automatically on in LlvmKernelBuilder constructor but it
may be useful to re-initialize the builder after finalize was called.
"""
if self.builder:
self.builder.initialize()
def finalize(self):
"""Restore the kernel builder state."""
if self.builder:
self.builder.finalize()
def get_sources_with_params(self, directory):
"""
Get list of .c files in the given directory and all its subdirectories
that contain definitions of module parameters (contain call to
module_param macro).
"""
path = os.path.join(self.kernel_dir, directory)
result = list()
for f in os.listdir(path):
file = os.path.join(path, f)
if os.path.isfile(file) and file.endswith(".c"):
for line in open(file, "r"):
if "module_param" in line:
result.append(file)
break
elif os.path.isdir(file):
dir_files = self.get_sources_with_params(file)
result.extend(dir_files)
return result
def build_cscope_database(self):
"""
Build a database for the cscope tool. It will be later used to find
source files with symbol definitions.
"""
# If the database exists, do not rebuild it
if "cscope.files" in os.listdir(self.kernel_dir):
return
# Write all files that need to be scanned into cscope.files
with open(os.path.join(self.kernel_dir, "cscope.files"), "w") \
as cscope_file:
for root, dirs, files in os.walk(self.kernel_dir):
if ("/Documentation/" in root or
"/scripts/" in root or
"/tmp" in root):
continue
for f in files:
if os.path.islink(os.path.join(root, f)):
continue
if f.endswith((".c", ".h", ".x", ".s", ".S")):
path = os.path.relpath(os.path.join(root, f),
self.kernel_dir)
cscope_file.write("{}\n".format(path))
# Build cscope database
cwd = os.getcwd()
os.chdir(self.kernel_dir)
check_call(["cscope", "-b", "-q", "-k"])
os.chdir(cwd)
def _cscope_run(self, symbol, definition):
"""
Run cscope search for a symbol.
:param symbol: Symbol to search for
:param definition: If true, search definitions, otherwise search all
usage.
:return: List of found cscope entries.
"""
self.build_cscope_database()
try:
command = ["cscope", "-d", "-L"]
if definition:
command.append("-1")
else:
command.append("-0")
command.append(symbol)
with open(os.devnull, "w") as devnull:
cscope_output = check_output(command, stderr=devnull).decode(
'utf-8')
return [l for l in cscope_output.splitlines() if
l.split()[0].endswith("c")]
except CalledProcessError:
return []
def _find_tracepoint_macro_use(self, symbol):
"""
Find usages of tracepoint macro creating a tracepoint symbol.
:param symbol: Symbol generated using the macro.
:return: List of found cscope entries.
"""
macro_argument = symbol[len("__tracepoint_"):]
candidates = self._cscope_run("EXPORT_TRACEPOINT_SYMBOL", False)
return list(filter(lambda c: c.endswith("(" + macro_argument + ");"),
candidates))
def find_srcs_with_symbol_def(self, symbol):
"""
Use cscope to find a definition of the given symbol.
:param symbol: Symbol to find.
:return List of source files potentially containing the definition.
"""
cwd = os.getcwd()
os.chdir(self.kernel_dir)
try:
cscope_defs = self._cscope_run(symbol, True)
# It may not be enough to get the definitions from the cscope.
# There are multiple possible reasons:
# - the symbol is only defined in headers
# - there is a bug in cscope - it cannot find definitions
# containing function pointers as parameters
cscope_uses = self._cscope_run(symbol, False)
# Look whether this is one of the special cases when cscope does
# not find a correct source because of the exact symbol being
# created by the preprocessor
if any([symbol.startswith(s) for s in
["param_get_", "param_set_", "param_ops_"]]):
# Symbol param_* are created in kernel/params.c using a macro
cscope_defs = ["kernel/params.c"] + cscope_defs
elif symbol.startswith("__tracepoint_"):
# Functions starting with __tracepoint_ are created by a macro
# in include/kernel/tracepoint.h; the corresponding usage of
# the macro has to be found to get the source file
cscope_defs = \
self._find_tracepoint_macro_use(symbol) + cscope_defs
elif symbol == "rcu_barrier":
cscope_defs = ["kernel/rcutree.c"] + cscope_defs
if len(cscope_defs) == 0 and len(cscope_uses) == 0:
raise SourceNotFoundException(symbol)
except SourceNotFoundException:
if symbol == "vfree":
cscope_uses = []
cscope_defs = ["mm/vmalloc.c"]
else:
raise
finally:
os.chdir(cwd)
# We now create a list of files potentially containing the file
# definition. The list is sorted by priority:
# 1. Files marked by cscope as containing the symbol definition.
# 2. Files marked by cscope as using the symbol in <global> scope.
# 3. Files marked by cscope as using the symbol in other scope.
# Each group is also partially sorted - sources from the drivers/ and
# the arch/ directories occur later than the others (using prio_key).
# Moreover, each file occurs in the list just once (in place of its
# highest priority).
seen = set()
def prio_key(item):
if item.startswith("drivers/"):
return "}" + item
if item.startswith("arch/x86"):
# x86 has priority over other architectures
return "}}" + item
if item.startswith("arch/"):
return "}}}" + item
else:
return item
files = sorted(
[f for f in [line.split()[0] for line in cscope_defs]
if not (f in seen or seen.add(f))],
key=prio_key)
files.extend(sorted(
[f for (f, scope) in [(line.split()[0], line.split()[1])
for line in cscope_uses]
if (scope == "<global>" and not (f in seen or seen.add(f)))],
key=prio_key))
files.extend(sorted(
[f for (f, scope) in [(line.split()[0], line.split()[1])
for line in cscope_uses]
if (scope != "<global>" and not (f in seen or seen.add(f)))],
key=prio_key))
return files
def find_srcs_using_symbol(self, symbol):
"""
Use cscope to find sources using a symbol.
:param symbol: Symbol to find.
:return List of source files containing functions that use the symbol.
"""
cwd = os.getcwd()
os.chdir(self.kernel_dir)
try:
cscope_out = self._cscope_run(symbol, False)
if len(cscope_out) == 0:
raise SourceNotFoundException
files = set()
for line in cscope_out:
if line.split()[0].endswith(".h"):
continue
if line.split()[1] == "<global>":
continue
files.add(os.path.relpath(line.split()[0], self.kernel_dir))
return files
except SourceNotFoundException:
raise
finally:
os.chdir(cwd)
def get_module_from_source(self, source_path, created_before=None):
"""
Create an LLVM module from a source file.
Builds the source into LLVM IR if needed. No module is returned if the
module is already present but its LLVM IR was generated or its source
file modified after the given time constraint.
:param source_path: Relative path to the file
:param created_before: File creation time constraint.
:returns Instance of LlvmKernelModule
"""
name = source_path[:-2] if source_path.endswith(".c") else source_path
llvm_file = os.path.join(self.kernel_dir, "{}.ll".format(name))
source_file = os.path.join(self.kernel_dir, source_path)
# If the LLVM IR file exits but was modified after the given timestamp,
# do not return the module.
if created_before:
try:
if (os.path.getmtime(source_file) > created_before or
os.path.getmtime(llvm_file) > created_before):
return None
except OSError:
pass
# If the module has already been created, return it
if name in self.modules:
return self.modules[name]
if self.builder:
try:
self.builder.build_source_to_llvm(source_file, llvm_file)
except BuildException:
pass
if not os.path.isfile(llvm_file):
return None
mod = LlvmKernelModule(llvm_file, source_file)
self.modules[name] = mod
return mod
def get_module_for_symbol(self, symbol, created_before=None):
"""
Looks up files containing definition of a symbol using CScope, then
transforms them into LLVM modules and looks whether the symbol is
actually defined in the created module.
In case there are multiple files containing the definition, the first
module containing the function definition is returned.
:param symbol: Name of the function to look up.
:param created_before: LLVM module creation time constraint.
:returns LLVM module containing the specified function.
"""
mod = None
srcs = self.find_srcs_with_symbol_def(symbol)
for src in srcs:
mod = self.get_module_from_source(src, created_before)
if mod:
if not (mod.has_function(symbol) or mod.has_global(symbol)):
mod = None
else:
break
if not mod:
raise SourceNotFoundException(symbol)
return mod
def get_sysctl_module(self, sysctl):
"""
Get the LLVM module containing the definition of a sysctl option.
:param sysctl: sysctl option to search for
:return: Instance of LlvmSysctlModule.
"""
# The sysctl is composed of entries separated by dots. Entries form
# a hierarchy - each entry is a child of its predecessor (i.e. all
# entries except the last one point to sysctl tables). We follow
# the hierarchy and build the source containing the parent table of
# the last entry.
entries = sysctl.split(".")
if entries[0] in ["kernel", "vm", "fs", "debug", "dev"]:
src = "kernel/sysctl.c"
table = "sysctl_base_table"
elif entries[0] == "net":
if entries[1] == "ipv4":
if entries[2] == "conf":
src = "net/ipv4/devinet.c"
table = "devinet_sysctl.1"
entries = entries[4:]
else:
src = "net/ipv4/sysctl_net_ipv4.c"
table = "ipv4_table"
entries = entries[2:]
elif entries[1] == "core":
src = "net/core/sysctl_net_core.c"
table = "net_core_table"
entries = entries[2:]
else:
raise SourceNotFoundException(sysctl)
else:
raise SourceNotFoundException(sysctl)
for (i, entry) in enumerate(entries):
# Build the file normally and then create a corresponding
# LlvmSysctlModule with the obtained sysctl table.
kernel_mod = self.get_module_from_source(src)
sysctl_mod = LlvmSysctlModule(kernel_mod, table)
if i == len(entries) - 1:
return sysctl_mod
table = sysctl_mod.get_child(entry).name
src = self.find_srcs_with_symbol_def(table)[0]
raise SourceNotFoundException(sysctl)
def get_module_for_kernel_mod(self, mod_dir, mod_name):
"""
Get LLVM module for a kernel module.
:param mod_dir: Kernel module directory.
:param mod_name: Kernel module name.
:return: LlvmKernelModule containing the built LLVM file.
"""
llvm_file = self.builder.build_kernel_mod_to_llvm(mod_dir, mod_name)
return LlvmKernelModule(os.path.join(self.kernel_dir, llvm_file))
@staticmethod
def create_dir_with_parents(directory):
"""
Create a directory with all parent directories.
Implements bash `mkdir -p`.
:param directory: Path to the directory to create.
"""
if not os.path.isdir(directory):
try:
os.makedirs(directory)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(directory):
pass
else:
raise
def copy_source_files(self, modules, target_dir):
"""
Copy C and LLVM source files of given modules from this kernel into
a different directory.
Preserves the directory structure.
Also copies all headers included by the modules.
:param modules: List of modules to copy.
:param target_dir: Destination directory (subfolders will be created
corresponding to the sources structure).
"""
for mod in modules:
src_dir = os.path.dirname(
os.path.relpath(mod.llvm, self.kernel_dir))
target_src_dir = os.path.join(target_dir, src_dir)
self.create_dir_with_parents(target_src_dir)
# Copy linked sources and headers.
for source in mod.get_included_sources():
src_source = source
dest_source = os.path.join(
target_dir, os.path.relpath(source, self.kernel_dir))
if not os.path.isfile(dest_source):
self.create_dir_with_parents(os.path.dirname(dest_source))
shutil.copyfile(src_source, dest_source)
mod.move_to_other_root_dir(self.kernel_dir, target_dir)
def copy_cscope_files(self, target_dir):
"""
Copy CScope database into a different directory. Since CScope files
contain paths relative to the kernel root, it can be used in the
target directory in case it contains the same directory structure
as this kernel does.
:param target_dir: Target directory.
"""
shutil.copy(os.path.join(self.kernel_dir, "cscope.files"), target_dir)
shutil.copy(os.path.join(self.kernel_dir, "cscope.in.out"), target_dir)
shutil.copy(os.path.join(self.kernel_dir, "cscope.out"), target_dir)
shutil.copy(os.path.join(self.kernel_dir, "cscope.po.out"), target_dir)
| 40.353081 | 79 | 0.578542 |
f19701610bea794151a15cf055abfdf35c39f262
| 4,473 |
py
|
Python
|
packages/sdk/odahuflow/sdk/models/inference_job_status.py
|
odahu/odahuflow
|
58c3220a266a61bb893cf79c4b994569e3445097
|
[
"ECL-2.0",
"Apache-2.0"
] | 12 |
2020-10-13T15:39:52.000Z
|
2021-10-11T17:13:42.000Z
|
packages/sdk/odahuflow/sdk/models/inference_job_status.py
|
odahu/odahuflow
|
58c3220a266a61bb893cf79c4b994569e3445097
|
[
"ECL-2.0",
"Apache-2.0"
] | 475 |
2019-11-18T12:40:47.000Z
|
2022-03-29T21:17:38.000Z
|
packages/sdk/odahuflow/sdk/models/inference_job_status.py
|
odahu/odahuflow
|
58c3220a266a61bb893cf79c4b994569e3445097
|
[
"ECL-2.0",
"Apache-2.0"
] | 4 |
2020-02-25T11:26:10.000Z
|
2021-03-10T12:01:00.000Z
|
# coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from odahuflow.sdk.models.base_model_ import Model
from odahuflow.sdk.models import util
class InferenceJobStatus(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, message: str=None, pod_name: str=None, reason: str=None, state: str=None): # noqa: E501
"""InferenceJobStatus - a model defined in Swagger
:param message: The message of this InferenceJobStatus. # noqa: E501
:type message: str
:param pod_name: The pod_name of this InferenceJobStatus. # noqa: E501
:type pod_name: str
:param reason: The reason of this InferenceJobStatus. # noqa: E501
:type reason: str
:param state: The state of this InferenceJobStatus. # noqa: E501
:type state: str
"""
self.swagger_types = {
'message': str,
'pod_name': str,
'reason': str,
'state': str
}
self.attribute_map = {
'message': 'message',
'pod_name': 'podName',
'reason': 'reason',
'state': 'state'
}
self._message = message
self._pod_name = pod_name
self._reason = reason
self._state = state
@classmethod
def from_dict(cls, dikt) -> 'InferenceJobStatus':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The InferenceJobStatus of this InferenceJobStatus. # noqa: E501
:rtype: InferenceJobStatus
"""
return util.deserialize_model(dikt, cls)
@property
def message(self) -> str:
"""Gets the message of this InferenceJobStatus.
Message is any message from runtime service about status of InferenceJob # noqa: E501
:return: The message of this InferenceJobStatus.
:rtype: str
"""
return self._message
@message.setter
def message(self, message: str):
"""Sets the message of this InferenceJobStatus.
Message is any message from runtime service about status of InferenceJob # noqa: E501
:param message: The message of this InferenceJobStatus.
:type message: str
"""
self._message = message
@property
def pod_name(self) -> str:
"""Gets the pod_name of this InferenceJobStatus.
PodName is a name of Pod in Kubernetes that is running under the hood of InferenceJob # noqa: E501
:return: The pod_name of this InferenceJobStatus.
:rtype: str
"""
return self._pod_name
@pod_name.setter
def pod_name(self, pod_name: str):
"""Sets the pod_name of this InferenceJobStatus.
PodName is a name of Pod in Kubernetes that is running under the hood of InferenceJob # noqa: E501
:param pod_name: The pod_name of this InferenceJobStatus.
:type pod_name: str
"""
self._pod_name = pod_name
@property
def reason(self) -> str:
"""Gets the reason of this InferenceJobStatus.
Reason is a reason of some InferenceJob state that was retrieved from runtime service. for example reason of failure # noqa: E501
:return: The reason of this InferenceJobStatus.
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason: str):
"""Sets the reason of this InferenceJobStatus.
Reason is a reason of some InferenceJob state that was retrieved from runtime service. for example reason of failure # noqa: E501
:param reason: The reason of this InferenceJobStatus.
:type reason: str
"""
self._reason = reason
@property
def state(self) -> str:
"""Gets the state of this InferenceJobStatus.
State describes current state of InferenceJob # noqa: E501
:return: The state of this InferenceJobStatus.
:rtype: str
"""
return self._state
@state.setter
def state(self, state: str):
"""Sets the state of this InferenceJobStatus.
State describes current state of InferenceJob # noqa: E501
:param state: The state of this InferenceJobStatus.
:type state: str
"""
self._state = state
| 29.622517 | 138 | 0.625531 |
7caa12f6a9c569942db12519b581ac444f5621d0
| 21,471 |
py
|
Python
|
angrmanagement/ui/views/disassembly_view.py
|
novafacing/angr-management
|
e7c94376736836094e247ca0dec73cede726408b
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/ui/views/disassembly_view.py
|
novafacing/angr-management
|
e7c94376736836094e247ca0dec73cede726408b
|
[
"BSD-2-Clause"
] | null | null | null |
angrmanagement/ui/views/disassembly_view.py
|
novafacing/angr-management
|
e7c94376736836094e247ca0dec73cede726408b
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
from typing import Union, Optional, TYPE_CHECKING
from PySide2.QtWidgets import QHBoxLayout, QVBoxLayout, QMenu, QApplication, QMessageBox
from PySide2.QtCore import Qt, QSize
from ...data.instance import ObjectContainer
from ...utils import locate_function
from ...data.function_graph import FunctionGraph
from ...logic.disassembly import JumpHistory, InfoDock
from ..widgets import QDisassemblyGraph, QDisasmStatusBar, QLinearDisassembly, QFeatureMap, QLinearDisassemblyView
from ..dialogs.dependson import DependsOn
from ..dialogs.jumpto import JumpTo
from ..dialogs.rename_label import RenameLabel
from ..dialogs.set_comment import SetComment
from ..dialogs.new_state import NewState
from ..dialogs.xref import XRef
from ..menus.disasm_insn_context_menu import DisasmInsnContextMenu
from ..menus.disasm_label_context_menu import DisasmLabelContextMenu
from .view import BaseView
if TYPE_CHECKING:
from angr.knowledge_plugins import Function
_l = logging.getLogger(__name__)
class DisassemblyView(BaseView):
def __init__(self, workspace, *args, **kwargs):
super(DisassemblyView, self).__init__('disassembly', workspace, *args, **kwargs)
self.caption = 'Disassembly'
self._show_address = True
self._show_variable = True
# whether we want to show identifier or not
self._show_variable_ident = False
# whether we want to show exception edges and all nodes that are only reachable through exception edges
self._show_exception_edges = True
self._linear_viewer = None # type: Optional[QLinearDisassembly]
self._flow_graph = None # type: Optional[QDisassemblyGraph]
self._statusbar = None
self._jump_history = JumpHistory()
self.infodock = InfoDock(self)
self._variable_recovery_flavor = 'fast'
self.variable_manager = None # type: Optional[VariableManager]
self._current_function = ObjectContainer(None, 'The currently selected function')
self._insn_menu = None # type: Optional[DisasmInsnContextMenu]
self._label_menu = None # type: Optional[DisasmLabelContextMenu]
self._insn_addr_on_context_menu = None
self._init_widgets()
self._init_menus()
self._register_events()
def reload(self):
old_infodock = self.infodock.copy()
self.infodock.initialize()
self._feature_map.refresh()
# Reload the current graph to make sure it gets the latest information, such as variables.
self.current_graph.reload(old_infodock=old_infodock)
def refresh(self):
self.current_graph.refresh()
self._feature_map.refresh()
def save_image_to(self, path):
if self._flow_graph is not None:
self._flow_graph.save_image_to(path)
def setFocus(self):
self._flow_graph.setFocus()
#
# Properties
#
@property
def disasm(self):
return self._flow_graph.disasm
@property
def smart_highlighting(self):
if self._flow_graph is None:
return False
if self.infodock is None:
return False
return self.infodock.smart_highlighting
@property
def show_address(self):
return self._show_address
@property
def show_variable(self):
return self._show_variable
@property
def show_variable_identifier(self):
return self._show_variable_ident
@property
def show_exception_edges(self):
return self._show_exception_edges
@property
def variable_recovery_flavor(self):
return self._variable_recovery_flavor
@variable_recovery_flavor.setter
def variable_recovery_flavor(self, v):
if v in ('fast', 'accurate'):
if v != self._variable_recovery_flavor:
self._variable_recovery_flavor = v
# TODO: Rerun the variable recovery analysis and update the current view
@property
def current_graph(self) -> Union[QLinearDisassemblyView,QDisassemblyGraph]:
"""
Return the current disassembly control, either linear viewer or flow graph.
:return: Linear viewer or flow graph.
"""
if self._linear_viewer.isVisible():
return self._linear_viewer
else:
return self._flow_graph
@property
def current_function(self) -> ObjectContainer:
return self._current_function
#
# Callbacks
#
# All callbacks are proxies to self.workspace.instance. These properties *in this class* may be removed in the near
# future.
@property
def insn_backcolor_callback(self):
return self.workspace.instance.insn_backcolor_callback
@insn_backcolor_callback.setter
def insn_backcolor_callback(self, v):
self.workspace.instance.insn_backcolor_callback = v
@property
def label_rename_callback(self):
return self.workspace.instance.label_rename_callback
@label_rename_callback.setter
def label_rename_callback(self, v):
self.workspace.instance.label_rename_callback = v
@property
def set_comment_callback(self):
return self.workspace.instance.set_comment_callback
@set_comment_callback.setter
def set_comment_callback(self, v):
self.workspace.instance.set_comment_callback = v
#
# Events
#
def keyPressEvent(self, event):
key = event.key()
if key == Qt.Key_Escape:
# jump back
# we put it here because the escape key is used to close other dialogs, and we do not want to catch the
# key-release event of the escape key in such cases.
self.jump_back()
return
super().keyPressEvent(event)
def keyReleaseEvent(self, event):
key = event.key()
if key == Qt.Key_G:
# jump to window
self.popup_jumpto_dialog()
return
elif key == Qt.Key_Left and QApplication.keyboardModifiers() & Qt.ALT != 0:
# jump back
self.jump_back()
return
elif key == Qt.Key_Right and QApplication.keyboardModifiers() & Qt.ALT != 0:
# jump forward
self.jump_forward()
return
elif key == Qt.Key_A:
# switch between highlight mode
self.toggle_smart_highlighting(not self.infodock.smart_highlighting)
return
elif key == Qt.Key_Tab:
# decompile
self.decompile_current_function()
return
elif key == Qt.Key_Semicolon:
# add comment
self.popup_comment_dialog()
return
elif key == Qt.Key_Space:
# switch to linear view
self.toggle_disasm_view()
event.accept()
return
super().keyReleaseEvent(event)
def redraw_current_graph(self, **kwargs):
"""
Redraw the graph currently in display.
:return: None
"""
self.current_graph.redraw()
def on_screen_changed(self):
self.current_graph.refresh()
#
# UI
#
def instruction_context_menu(self, insn, pos):
self._insn_addr_on_context_menu = insn.addr
# pass in the instruction address
self._insn_menu.insn_addr = insn.addr
# pop up the menu
self._insn_menu.qmenu(extra_entries=list(self.workspace.plugins.build_context_menu_insn(insn))).exec_(pos)
self._insn_addr_on_context_menu = None
def label_context_menu(self, addr: int, pos):
self._label_addr_on_context_menu = addr
self._label_menu.addr = addr
self._label_menu.qmenu().exec_(pos)
self._label_addr_on_context_menu = None
def popup_jumpto_dialog(self):
JumpTo(self, parent=self).exec_()
def popup_rename_label_dialog(self):
label_addr = self._address_in_selection()
if label_addr is None:
return
dialog = RenameLabel(self, label_addr, parent=self)
dialog.exec_()
def popup_comment_dialog(self):
comment_addr = self._address_in_selection()
if comment_addr is None:
return
dialog = SetComment(self.workspace, comment_addr, parent=self)
dialog.exec_()
def popup_newstate_dialog(self, async_=True):
addr = self._address_in_selection()
if addr is None:
return
dialog = NewState(self.workspace.instance, addr=addr, create_simgr=True, parent=self)
if async_:
dialog.show()
else:
dialog.exec_()
def popup_dependson_dialog(self, addr: Optional[int]=None, use_operand=False, func: bool=False,
async_=True):
if use_operand:
r = self._flow_graph.get_selected_operand_info()
if r is not None:
_, addr, operand = r
else:
QMessageBox(self,
"No operand"
"Please select an operand first.",
buttons=QMessageBox.Ok,
icon=QMessageBox.Critical
)
return
else:
if addr is None:
raise ValueError("No address is provided.") # this is a programming error
operand = None
if func:
# attempt to pass in a function
try:
the_func = self.workspace.instance.kb.functions.get_by_addr(addr)
except KeyError:
the_func = None
else:
the_func = None
dependson = DependsOn(addr, operand, func=the_func, parent=self)
dependson.exec_()
if dependson.location is not None:
if dependson.arg is not None:
# track function argument
self.workspace._main_window.run_dependency_analysis(
func_addr=addr,
func_arg_idx=dependson.arg,
)
def parse_operand_and_popup_xref_dialog(self, ins_addr, operand, async_=True):
if operand is not None:
if operand.variable is not None:
# Display cross references to this variable
self.popup_xref_dialog(addr=ins_addr, variable=operand.variable, async_=async_)
elif operand.is_constant:
# Display cross references to an address
self.popup_xref_dialog(addr=ins_addr, dst_addr=operand.constant_value, async_=async_)
elif operand.is_constant_memory:
# Display cross references to an address
self.popup_xref_dialog(addr=ins_addr, dst_addr=operand.constant_memory_value, async_=async_)
def popup_xref_dialog(self, addr=None, variable=None, dst_addr=None, async_=True):
if variable is not None:
dialog = XRef(addr=addr, variable_manager=self.variable_manager, variable=variable,
instance=self.workspace.instance, parent=self)
else:
dialog = XRef(addr=addr, xrefs_manager=self.workspace.instance.project.kb.xrefs, dst_addr=dst_addr,
instance=self.workspace.instance, parent=self)
if async_:
dialog.show()
else:
dialog.exec_()
#
# Public methods
#
def toggle_disasm_view(self):
if self._flow_graph.isHidden():
# Show flow graph
self.display_disasm_graph()
else:
# Show linear viewer
self.display_linear_viewer()
def display_disasm_graph(self):
self._linear_viewer.hide()
self._flow_graph.show()
if self.infodock.selected_insns:
# display the currently selected instruction
self._jump_to(next(iter(self.infodock.selected_insns)))
elif self._current_function.am_obj is not None:
self._flow_graph.show_instruction(self._current_function.addr)
self._flow_graph.setFocus()
def display_linear_viewer(self):
self._flow_graph.hide()
self._linear_viewer.show()
if self.infodock.selected_insns:
# display the currently selected instruction
self._linear_viewer.show_instruction(next(iter(self.infodock.selected_insns)))
elif self._current_function.am_obj is not None:
self._linear_viewer.show_instruction(self._current_function.addr)
self._linear_viewer.setFocus()
def display_function(self, function):
self._jump_history.jump_to(function.addr)
self._display_function(function)
def decompile_current_function(self):
if self._current_function.am_obj is not None:
try:
curr_ins = next(iter(self.infodock.selected_insns))
except StopIteration:
curr_ins = None
self.workspace.decompile_function(self._current_function.am_obj, curr_ins=curr_ins)
def toggle_smart_highlighting(self, enabled):
"""
Toggle between the smart highlighting mode and the text-based highlighting mode.
:param bool enabled: Enable smart highlighting.
:return: None
"""
self.infodock.smart_highlighting = enabled
self._flow_graph.refresh()
self._linear_viewer.refresh()
def toggle_show_address(self, show_address):
"""
Toggle whether addresses are shown on disassembly graph.
:param bool show_address: Whether the address should be shown or not.
:return: None
"""
self._show_address = show_address
self.current_graph.refresh()
def toggle_show_variable(self, show_variable):
"""
Toggle whether variables are shown on disassembly graph.
:param bool show_variable: Whether the variable should be shown or not.
:return: None
"""
self._show_variable = show_variable
self.current_graph.refresh()
def toggle_show_variable_identifier(self, show_ident):
"""
Toggle whether variable identifiers are shown on disassembly graph.
:param bool show_ident: Whether variable identifiers should be shown or not.
:return: None
"""
self._show_variable_ident = show_ident
self.current_graph.refresh()
def toggle_show_exception_edges(self, show_exception_edges):
"""
Toggle whether exception edges and the nodes that are only reachable through exception edges should be shown
or not.
:param bool show_exception_edges: Whether exception edges should be shown or not.
:return: None
"""
if show_exception_edges != self._show_exception_edges:
self._show_exception_edges = show_exception_edges
# reset the function graph
if self._flow_graph.function_graph is not None:
self._flow_graph.function_graph.exception_edges = show_exception_edges
self._flow_graph.function_graph.clear_cache()
self._flow_graph.reload()
def jump_to(self, addr, src_ins_addr=None):
# Record the current instruction address first
if src_ins_addr is not None:
self._jump_history.record_address(src_ins_addr)
self._jump_history.jump_to(addr)
self._jump_to(addr)
return True
def jump_back(self):
addr = self._jump_history.backtrack()
if addr is not None:
self._jump_to(addr)
def jump_forward(self):
addr = self._jump_history.forwardstep()
if addr is not None:
self._jump_to(addr)
def select_label(self, label_addr):
self.infodock.select_label(label_addr)
def rename_label(self, addr, new_name):
if self._flow_graph.disasm is not None:
is_renaming = False
kb = self._flow_graph.disasm.kb
if new_name == '':
if addr in kb.labels:
del kb.labels[addr]
else:
if addr in kb.labels:
is_renaming = True
kb.labels[addr] = new_name
# callback first
if self.workspace.instance.label_rename_callback:
self.workspace.instance.label_rename_callback(addr=addr, new_name=new_name)
# redraw the current block
self._flow_graph.update_label(addr, is_renaming=is_renaming)
def avoid_addr_in_exec(self, addr):
self.workspace.view_manager.first_view_in_category('symexec').avoid_addr_in_exec(addr)
def sizeHint(self):
return QSize(800, 800)
def run_induction_variable_analysis(self):
if self._flow_graph.induction_variable_analysis:
self._flow_graph.induction_variable_analysis = None
else:
ana = self.workspace.instance.project.analyses.AffineRelationAnalysis(self._flow_graph._function_graph.function)
self._flow_graph.induction_variable_analysis = ana
self._flow_graph.refresh()
#
# Initialization
#
def _init_widgets(self):
self._linear_viewer = QLinearDisassembly(self.workspace, self, parent=self)
self._flow_graph = QDisassemblyGraph(self.workspace, self, parent=self)
self._feature_map = QFeatureMap(self, parent=self)
self._statusbar = QDisasmStatusBar(self, parent=self)
vlayout = QVBoxLayout()
vlayout.addWidget(self._feature_map)
vlayout.addWidget(self._flow_graph)
vlayout.addWidget(self._linear_viewer)
vlayout.addWidget(self._statusbar)
vlayout.setContentsMargins(0, 0, 0, 0)
self._feature_map.setMaximumHeight(25)
vlayout.setStretchFactor(self._feature_map, 0)
vlayout.setStretchFactor(self._flow_graph, 1)
vlayout.setStretchFactor(self._linear_viewer, 1)
vlayout.setStretchFactor(self._statusbar, 0)
hlayout = QHBoxLayout()
hlayout.addLayout(vlayout)
self.setLayout(hlayout)
self.display_disasm_graph()
# self.display_linear_viewer()
self.workspace.plugins.instrument_disassembly_view(self)
def _init_menus(self):
self._insn_menu = DisasmInsnContextMenu(self)
self._label_menu = DisasmLabelContextMenu(self)
def _register_events(self):
# redraw the current graph if instruction/operand selection changes
self.infodock.selected_insns.am_subscribe(self.redraw_current_graph)
self.infodock.selected_operands.am_subscribe(self.redraw_current_graph)
self.infodock.selected_blocks.am_subscribe(self.redraw_current_graph)
self.infodock.hovered_block.am_subscribe(self.redraw_current_graph)
self.infodock.hovered_edge.am_subscribe(self.redraw_current_graph)
self.infodock.selected_labels.am_subscribe(self.redraw_current_graph)
self._feature_map.addr.am_subscribe(lambda: self._jump_to(self._feature_map.addr.am_obj))
self.workspace.current_screen.am_subscribe(self.on_screen_changed)
#
# Private methods
#
def _display_function(self, the_func):
self._current_function.am_obj = the_func
self._current_function.am_event()
# set status bar
self._statusbar.function = the_func
# variable recovery
variable_manager = self.workspace.instance.project.kb.variables
self.variable_manager = variable_manager
self.infodock.variable_manager = variable_manager
# clear existing selected instructions and operands
self.infodock.clear_selection()
if self._flow_graph.isVisible():
if self._flow_graph.function_graph is None or self._flow_graph.function_graph.function is not the_func:
# set function graph of a new function
self._flow_graph.function_graph = FunctionGraph(function=the_func,
exception_edges=self.show_exception_edges,
)
elif self._linear_viewer.isVisible():
self._linear_viewer.navigate_to_addr(the_func.addr)
self.workspace.view_manager.first_view_in_category('console').push_namespace({
'func': the_func,
'function_': the_func,
})
def _jump_to(self, addr):
function = locate_function(self.workspace.instance, addr)
if function is not None:
self._display_function(function)
instr_addr = function.addr_to_instruction_addr(addr)
if instr_addr is None:
instr_addr = addr
self.infodock.select_instruction(instr_addr, unique=True)
return True
# it does not belong to any function - we need to switch to linear view mode
if self.current_graph is not self._linear_viewer:
self.display_linear_viewer()
self._linear_viewer.navigate_to_addr(addr)
return True
#
# Utils
#
def _address_in_selection(self):
if self._insn_addr_on_context_menu is not None:
return self._insn_addr_on_context_menu
elif len(self.infodock.selected_insns) == 1:
return next(iter(self.infodock.selected_insns))
else:
return None
| 33.600939 | 124 | 0.645662 |
c61795106f044b30258d1de96e78591da9525122
| 653 |
py
|
Python
|
Atividade 01/sem-07-T1-Q3.py
|
daianasousa/Atividade-Remota-Semana-07
|
1c4a28bf052057e921730ba79dfb0cdaa74576e0
|
[
"MIT"
] | null | null | null |
Atividade 01/sem-07-T1-Q3.py
|
daianasousa/Atividade-Remota-Semana-07
|
1c4a28bf052057e921730ba79dfb0cdaa74576e0
|
[
"MIT"
] | null | null | null |
Atividade 01/sem-07-T1-Q3.py
|
daianasousa/Atividade-Remota-Semana-07
|
1c4a28bf052057e921730ba79dfb0cdaa74576e0
|
[
"MIT"
] | null | null | null |
def main():
tempo = 1
taxa_A = (2 / 100)
taxa_B = (3 / 100)
p_1 = int(input('Digite a população do país: '))
p_2 = int(input('Digite a população do país: '))
if p_1 > p_2:
pais_A = p_1
pais_B = p_2
elif p_2 > p_1:
pais_A = p_2
pais_B = p_1
populacao_A = (pais_A + (pais_A * taxa_A))
opulacao_B = (pais_B + (pais_B * taxa_B))
while True:
if pais_A > pais_B:
pais_A += (pais_A * taxa_A)
pais_B += (pais_B * taxa_B)
tempo += 1
elif pais_A < pais_B:
print(tempo)
break
if __name__=='__main__':
main()
| 22.517241 | 52 | 0.502297 |
7ea2180d0894f3578696d3a48a79fc300c7fbbe5
| 49,918 |
py
|
Python
|
nums/numpy/api.py
|
yangkevin2/nums
|
fbd8f680b8e4b292d18bd5fa1b49e3cd216f9d0f
|
[
"Apache-2.0"
] | null | null | null |
nums/numpy/api.py
|
yangkevin2/nums
|
fbd8f680b8e4b292d18bd5fa1b49e3cd216f9d0f
|
[
"Apache-2.0"
] | null | null | null |
nums/numpy/api.py
|
yangkevin2/nums
|
fbd8f680b8e4b292d18bd5fa1b49e3cd216f9d0f
|
[
"Apache-2.0"
] | 1 |
2021-06-22T21:11:25.000Z
|
2021-06-22T21:11:25.000Z
|
# coding=utf-8
# Copyright (C) 2020 NumS Development Team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import warnings
import numpy as np
import scipy.stats
from nums.core.application_manager import instance as _instance
from nums.core.array.blockarray import BlockArray
from nums.numpy import numpy_utils
# pylint: disable = redefined-builtin, too-many-lines
def _not_implemented(func):
# From project JAX: https://github.com/google/jax/blob/master/jax/numpy/lax_numpy.py
def wrapped(*args, **kwargs):
# pylint: disable=unused-argument
msg = "NumPy function {} not yet implemented."
raise NotImplementedError(msg.format(func))
return wrapped
# TODO (mwe): Convert this to invoke the NumPy op on a worker instead of the driver.
def _default_to_numpy(func):
def wrapped(*args, **kwargs):
warnings.warn("Operation " + func.__name__ + " not implemented, falling back to NumPy. " +
"If this is too slow or failing, please open an issue on GitHub.",
RuntimeWarning)
new_args = [arg.get() if isinstance(arg, BlockArray) else arg for arg in args]
new_kwargs = {k: v.get() if isinstance(v, BlockArray) else v
for k, v in zip(kwargs.keys(), kwargs.values())}
res = np.__getattribute__(func.__name__)(*new_args, **new_kwargs)
if isinstance(res, tuple):
nps_res = tuple(array(x) for x in res)
else:
nps_res = array(res)
return nps_res
return wrapped
############################################
# Constants
############################################
# Distributed memory access of these values will be optimized downstream.
pi = np.pi
e = np.e
euler_gamma = np.euler_gamma
inf = infty = Inf = Infinity = PINF = np.inf
NINF = np.NINF
PZERO = np.PZERO
NZERO = np.NZERO
nan = NAN = NaN = np.nan
############################################
# Data Types
############################################
bool_ = np.bool_
uint = np.uint
uint8 = np.uint8
uint16 = np.uint16
uint32 = np.uint32
uint64 = np.uint64
int8 = np.int8
int16 = np.int16
int32 = np.int32
int64 = np.int64
float16 = np.float16
float32 = np.float32
float64 = np.float64
float128 = np.float128
complex64 = np.complex64
complex128 = np.complex128
############################################
# Creation and I/O Ops
############################################
def loadtxt(fname, dtype=float, comments='# ', delimiter=' ',
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None) -> BlockArray:
app = _instance()
num_rows = app.cm.num_cores_total()
try:
ba: BlockArray = app.loadtxt(
fname, dtype=dtype, comments=comments, delimiter=delimiter,
converters=converters, skiprows=skiprows,
usecols=usecols, unpack=unpack, ndmin=ndmin,
encoding=encoding, max_rows=max_rows, num_workers=num_rows)
shape = ba.shape
block_shape = app.compute_block_shape(shape, dtype)
return ba.reshape(block_shape=block_shape)
except Exception as _:
warnings.warn("Failed to load text data in parallel; using np.loadtxt locally.")
np_arr = np.loadtxt(fname, dtype=dtype, comments=comments, delimiter=delimiter,
converters=converters, skiprows=skiprows,
usecols=usecols, unpack=unpack, ndmin=ndmin,
encoding=encoding, max_rows=max_rows)
shape = np_arr.shape
block_shape = app.compute_block_shape(shape, dtype)
return app.array(np_arr, block_shape=block_shape)
def array(object, dtype=None, copy=True, order="K", ndmin=0, subok=False) -> BlockArray:
if order is not None and order != "K":
raise NotImplementedError("Only order='K' is supported.")
if ndmin != 0:
raise NotImplementedError("Only ndmin=0 is currently supported.")
if subok:
raise ValueError("subok must be False.")
if isinstance(object, BlockArray):
if copy:
object = object.copy()
if dtype is not None:
if dtype is not object.dtype:
object = object.astype(dtype)
return object
result = np.array(object, dtype=dtype, copy=copy,
order=order, ndmin=ndmin, subok=subok)
dtype = np.__getattribute__(str(result.dtype))
shape = result.shape
app = _instance()
block_shape = app.compute_block_shape(shape, dtype)
return app.array(result, block_shape)
def empty(shape, dtype=float):
app = _instance()
if isinstance(shape, int):
shape = (shape,)
block_shape = app.compute_block_shape(shape, dtype)
return app.empty(shape=shape, block_shape=block_shape, dtype=dtype)
def zeros(shape, dtype=float):
app = _instance()
if isinstance(shape, int):
shape = (shape,)
block_shape = app.get_block_shape(shape, dtype)
return app.zeros(shape=shape, block_shape=block_shape, dtype=dtype)
def ones(shape, dtype=float):
app = _instance()
if isinstance(shape, int):
shape = (shape,)
block_shape = app.get_block_shape(shape, dtype)
return app.ones(shape=shape, block_shape=block_shape, dtype=dtype)
def empty_like(prototype: BlockArray, dtype=None, order='K', shape=None):
if shape is None:
shape = prototype.shape
if dtype is None:
dtype = prototype.dtype
if order is not None and order != "K":
raise NotImplementedError("Only order='K' is supported.")
return empty(shape, dtype)
def zeros_like(prototype, dtype=None, order='K', shape=None):
if shape is None:
shape = prototype.shape
if dtype is None:
dtype = prototype.dtype
if order is not None and order != "K":
raise NotImplementedError("Only order='K' is supported.")
return zeros(shape, dtype)
def ones_like(prototype, dtype=None, order='K', shape=None):
if shape is None:
shape = prototype.shape
if dtype is None:
dtype = prototype.dtype
if order is not None and order != "K":
raise NotImplementedError("Only order='K' is supported.")
return ones(shape, dtype)
def concatenate(arrays, axis=0, out=None):
if out is not None:
raise NotImplementedError("out is currently not supported for concatenate.")
# Pick the mode along specified axis.
axis_block_size = scipy.stats.mode(list(map(
lambda arr: arr.block_shape[axis], arrays
))).mode.item()
return _instance().concatenate(arrays, axis=axis, axis_block_size=axis_block_size)
def split(ary: BlockArray, indices_or_sections, axis=0):
if not isinstance(indices_or_sections, int):
raise NotImplementedError("Split currently supports integers only.")
dim_total = ary.shape[axis]
# Splits into N equal arrays, and raise if this is not possible.
if dim_total % indices_or_sections != 0:
raise ValueError("ary axis %s cannot be split into %s equal arrays." % (
axis,
indices_or_sections))
dim_partial = dim_total // indices_or_sections
results = []
ss_op = [slice(None, None, 1) for _ in ary.shape]
for i in range(0, dim_total, dim_partial):
start = i
stop = i + dim_partial
ss_op[axis] = slice(start, stop, 1)
ary_part = ary[tuple(ss_op)]
results.append(ary_part)
return tuple(results)
def identity(n: int, dtype=float) -> BlockArray:
return eye(n, n, dtype=dtype)
def eye(N, M=None, k=0, dtype=float):
app = _instance()
if k != 0:
raise NotImplementedError("Only k==0 is currently supported.")
if M is None:
M = N
shape = (N, M)
block_shape = app.get_block_shape(shape, dtype)
return app.eye(shape, block_shape, dtype)
def diag(v: BlockArray, k=0) -> BlockArray:
app = _instance()
if k != 0:
raise NotImplementedError("Only k==0 is currently supported.")
return app.diag(v)
def trace(a: BlockArray, offset=0, axis1=0, axis2=1, dtype=None, out=None):
if offset != 0:
raise NotImplementedError("offset != 0 is currently not supported.")
if out is not None:
raise NotImplementedError("out is currently not supported.")
if axis1 != 0 or axis2 != 1:
raise NotImplementedError(" axis1 != 0 or axis2 != 1 is currently not supported.")
return sum(diag(a, offset), dtype=dtype, out=out)
def atleast_1d(*arys):
return _instance().atleast_1d(*arys)
def atleast_2d(*arys):
return _instance().atleast_2d(*arys)
def atleast_3d(*arys):
return _instance().atleast_3d(*arys)
############################################
# Manipulation Ops
############################################
def arange(start=None, stop=None, step=1, dtype=None) -> BlockArray:
if start is None:
raise TypeError("Missing required argument start")
if stop is None:
stop = start
start = 0
if step != 1:
raise NotImplementedError("Only step size of 1 is currently supported.")
if dtype is None:
dtype = np.__getattribute__(str(np.result_type(start, stop)))
shape = (int(np.ceil(stop - start)),)
app = _instance()
block_shape = app.get_block_shape(shape, dtype)
return app.arange(start, shape, block_shape, step, dtype)
def linspace(start, stop, num=50, endpoint=True, retstep=False, dtype=None, axis=0):
shape = (num,)
dtype = np.float64 if dtype is None else dtype
app = _instance()
block_shape = app.get_block_shape(shape, dtype)
return app.linspace(start, stop, shape, block_shape, endpoint, retstep, dtype, axis)
def logspace(start, stop, num=50, endpoint=True, base=10.0, dtype=None, axis=0):
app = _instance()
ba: BlockArray = linspace(start, stop, num, endpoint, dtype=None, axis=axis)
ba = power(app.scalar(base), ba)
if dtype is not None and dtype != ba.dtype:
ba = ba.astype(dtype)
return ba
############################################
# Linear Algebra Ops
############################################
def tensordot(x1: BlockArray, x2: BlockArray, axes=2) -> BlockArray:
return _instance().tensordot(arr_1=x1,
arr_2=x2,
axes=axes)
def matmul(x1: BlockArray, x2: BlockArray) -> BlockArray:
return _instance().matmul(arr_1=x1,
arr_2=x2)
def inner(a: BlockArray, b: BlockArray):
assert len(a.shape) == len(b.shape) == 1, "Only single-axis inputs supported."
return a.T @ b
def outer(a: BlockArray, b: BlockArray):
assert len(a.shape) == len(b.shape) == 1, "Only single-axis inputs supported."
return a.reshape((a.shape[0], 1)) @ b.reshape((1, b.shape[0]))
############################################
# Shape Ops
############################################
def shape(a: BlockArray):
return a.shape
def size(a: BlockArray):
return a.size
def ndim(a: BlockArray):
return a.ndim
def reshape(a: BlockArray, shape):
block_shape = _instance().compute_block_shape(shape, a.dtype)
return a.reshape(shape, block_shape=block_shape)
def expand_dims(a: BlockArray, axis):
return a.expand_dims(axis)
def squeeze(a: BlockArray, axis=None):
assert axis is None, "axis not supported."
return a.squeeze()
def swapaxes(a: BlockArray, axis1: int, axis2: int):
return a.swapaxes(axis1, axis2)
def transpose(a: BlockArray, axes=None):
assert axes is None, "axes not supported."
return a.T
############################################
# Misc
############################################
def copy(a: BlockArray, order='K', subok=False):
assert order == 'K' and not subok, "Only default args supported."
return a.copy()
############################################
# Reduction Ops
############################################
def min(a: BlockArray, axis=None, out=None,
keepdims=False, initial=None, where=None) -> BlockArray:
if initial is not None:
raise NotImplementedError("'initial' is currently not supported.")
if where is not None:
raise NotImplementedError("'where' is currently not supported.")
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().min(a, axis=axis, keepdims=keepdims)
amin = min
def max(a: BlockArray, axis=None, out=None,
keepdims=False, initial=None, where=None) -> BlockArray:
if initial is not None:
raise NotImplementedError("'initial' is currently not supported.")
if where is not None:
raise NotImplementedError("'where' is currently not supported.")
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().max(a, axis=axis, keepdims=keepdims)
amax = max
def argmin(a: BlockArray, axis=None, out=None):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().argop("argmin", a, axis=axis)
def argmax(a: BlockArray, axis=None, out=None):
if len(a.shape) > 1:
raise NotImplementedError("argmax currently only supports one-dimensional arrays.")
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().argop("argmax", a, axis=axis)
def sum(a: BlockArray, axis=None, dtype=None, out=None,
keepdims=False, initial=None, where=None) -> BlockArray:
if initial is not None:
raise NotImplementedError("'initial' is currently not supported.")
if where is not None:
raise NotImplementedError("'where' is currently not supported.")
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().sum(a, axis=axis, keepdims=keepdims, dtype=dtype)
def mean(a: BlockArray, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().mean(a, axis=axis, keepdims=keepdims, dtype=dtype)
def var(a: BlockArray, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().var(a, axis=axis, ddof=ddof, keepdims=keepdims, dtype=dtype)
def std(a: BlockArray, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().std(a, axis=axis, ddof=ddof, keepdims=keepdims, dtype=dtype)
def where(condition: BlockArray, x: BlockArray = None, y: BlockArray = None):
return _instance().where(condition, x, y)
def all(a: BlockArray, axis=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().reduce("all", a, axis=axis, keepdims=keepdims)
def alltrue(a: BlockArray, axis=None, out=None, dtype=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().reduce("alltrue", a, axis=axis, keepdims=keepdims, dtype=dtype)
def any(a: BlockArray, axis=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().reduce("any", a, axis=axis, keepdims=keepdims)
############################################
# NaN Ops
############################################
def nanmax(a: BlockArray, axis=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().reduce("nanmax", a, axis=axis, keepdims=keepdims)
def nanmin(a: BlockArray, axis=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().reduce("nanmin", a, axis=axis, keepdims=keepdims)
def nansum(a: BlockArray, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().reduce("nansum", a, axis=axis, dtype=dtype, keepdims=keepdims)
def nanmean(a: BlockArray, axis=None, dtype=None, out=None, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().nanmean(a, axis=axis, dtype=dtype, keepdims=keepdims)
def nanvar(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().nanvar(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims)
def nanstd(a, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
if out is not None:
raise NotImplementedError("'out' is currently not supported.")
return _instance().nanstd(a, axis=axis, dtype=dtype, ddof=ddof, keepdims=keepdims)
############################################
# Utility Ops
############################################
def array_equal(a: BlockArray, b: BlockArray, equal_nan=False) -> BlockArray:
if equal_nan is not False:
raise NotImplementedError("equal_nan=True not supported.")
return _instance().array_equal(a, b)
def array_equiv(a: BlockArray, b: BlockArray) -> BlockArray:
return _instance().array_equiv(a, b)
def allclose(a: BlockArray, b: BlockArray, rtol=1.e-5, atol=1.e-8, equal_nan=False) -> BlockArray:
if equal_nan is not False:
raise NotImplementedError("equal_nan=True not supported.")
return _instance().allclose(a, b, rtol, atol)
############################################
# Generated Ops (Unary, Binary)
############################################
def abs(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="abs",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def absolute(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="absolute",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arccos(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="arccos",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arccosh(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="arccosh",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arcsin(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="arcsin",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arcsinh(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="arcsinh",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arctan(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="arctan",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arctanh(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="arctanh",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def bitwise_not(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="bitwise_not",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def cbrt(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="cbrt",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def ceil(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="ceil",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def conj(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="conj",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def conjugate(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="conjugate",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def cos(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="cos",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def cosh(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="cosh",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def deg2rad(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="deg2rad",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def degrees(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="degrees",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def exp(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="exp",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def exp2(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="exp2",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def expm1(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="expm1",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def fabs(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="fabs",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def floor(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="floor",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def invert(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="invert",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def isfinite(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="isfinite",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def isinf(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="isinf",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def isnan(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="isnan",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def log(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="log",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def log10(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="log10",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def log1p(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="log1p",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def log2(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="log2",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def logical_not(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="logical_not",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def negative(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="negative",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def positive(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="positive",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def rad2deg(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="rad2deg",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def radians(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="radians",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def reciprocal(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="reciprocal",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def rint(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="rint",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def sign(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="sign",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def signbit(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="signbit",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def sin(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="sin",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def sinh(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="sinh",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def spacing(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="spacing",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def sqrt(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="sqrt",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def square(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="square",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def tan(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="tan",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def tanh(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="tanh",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def trunc(x: BlockArray, out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_uop(op_name="trunc",
arr=x,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def add(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="add",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def arctan2(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="arctan2",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def bitwise_and(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="bitwise_and",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def bitwise_or(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="bitwise_or",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def bitwise_xor(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="bitwise_xor",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def copysign(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="copysign",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def divide(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="divide",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def equal(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="equal",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def float_power(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="float_power",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def floor_divide(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="floor_divide",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def fmax(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="fmax",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def fmin(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="fmin",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def fmod(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="fmod",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def gcd(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="gcd",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def greater(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="greater",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def greater_equal(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="greater_equal",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def heaviside(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="heaviside",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def hypot(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="hypot",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def lcm(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="lcm",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def ldexp(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="ldexp",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def left_shift(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="left_shift",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def less(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="less",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def less_equal(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="less_equal",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def logaddexp(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="logaddexp",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def logaddexp2(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="logaddexp2",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def logical_and(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="logical_and",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def logical_or(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="logical_or",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def logical_xor(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="logical_xor",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def maximum(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="maximum",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def minimum(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="minimum",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def mod(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="mod",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def multiply(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="multiply",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def nextafter(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="nextafter",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def not_equal(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="not_equal",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def power(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="power",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def remainder(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="remainder",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def right_shift(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="right_shift",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def subtract(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="subtract",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
def true_divide(x1: BlockArray, x2: BlockArray,
out: BlockArray = None, where=True, **kwargs) -> BlockArray:
return _instance().map_bop(op_name="true_divide",
arr_1=x1,
arr_2=x2,
out=out,
where=where,
kwargs=numpy_utils.ufunc_kwargs(kwargs))
| 37.673962 | 98 | 0.516026 |
68c02fdc8fa5307347e604d9debceb6bae7717fc
| 4,172 |
py
|
Python
|
api/serializers.py
|
chop-dbhi/biorepo-portal
|
7db13c40b2b9d62af43a28e4af08c2472b98fc96
|
[
"BSD-2-Clause"
] | 6 |
2016-10-26T19:51:11.000Z
|
2021-03-18T16:05:55.000Z
|
api/serializers.py
|
chop-dbhi/biorepo-portal
|
7db13c40b2b9d62af43a28e4af08c2472b98fc96
|
[
"BSD-2-Clause"
] | 207 |
2015-09-24T17:41:37.000Z
|
2021-05-18T18:14:08.000Z
|
api/serializers.py
|
chop-dbhi/biorepo-portal
|
7db13c40b2b9d62af43a28e4af08c2472b98fc96
|
[
"BSD-2-Clause"
] | 8 |
2016-04-27T19:04:50.000Z
|
2020-08-24T02:33:05.000Z
|
from django.contrib.auth.models import User, Group
from rest_framework import serializers
from api.models.protocols import Organization, DataSource, Protocol,\
ProtocolDataSource, ProtocolDataSourceLink, ProtocolUser,\
ProtocolUserCredentials
class UserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = User
fields = ('url', 'username', 'email', 'groups', 'first_name', 'last_name')
class GroupSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Group
fields = ('url',)
class OrganizationSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = Organization
fields = ('id', 'name', 'subject_id_label')
class DataSourceSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = DataSource
fields = ('id', 'name', 'url', 'desc_help', 'description', 'ehb_service_es_id')
class ProtocolSerializer(serializers.HyperlinkedModelSerializer):
protocol_data_sources = serializers.HyperlinkedIdentityField(view_name='protocol-datasources-list')
subjects = serializers.HyperlinkedIdentityField(view_name='protocol-subject-list')
organizations = serializers.HyperlinkedIdentityField(view_name='protocol-organization-list')
class Meta:
model = Protocol
fields = ('id', 'name', 'users', 'data_sources', 'protocol_data_sources', 'subjects', 'organizations')
class ProtocolDataSourceSerializer(serializers.HyperlinkedModelSerializer):
subjects = serializers.HyperlinkedIdentityField(view_name='pds-subject-list')
class Meta:
model = ProtocolDataSource
fields = ('id', 'protocol', 'data_source', 'path', 'driver', 'driver_configuration',
'display_label', 'max_records_per_subject', 'subjects')
class ProtocolDataSourceLinkSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProtocolDataSourceLink
class ProtocolUserSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProtocolUser
class ProtocolUserCredentialsSerializer(serializers.HyperlinkedModelSerializer):
class Meta:
model = ProtocolUserCredentials
class eHBOrganizationSerializer(serializers.Serializer):
"""
This serializer corresponds to the definition of an eHB Organization
see:
https://github.com/chop-dbhi/ehb-service/blob/master/ehb_service/apps/core/models/identities.py
and its requested representation:
see:
https://github.com/chop-dbhi/ehb-client/blob/master/ehb_client/requests/organization_request_handler.py
"""
id = serializers.IntegerField()
name = serializers.CharField(max_length=255)
subject_id_label = serializers.CharField(max_length=50)
created = serializers.DateTimeField()
modified = serializers.DateTimeField()
class eHBSubjectSerializer(serializers.Serializer):
"""
This serializer corresponds to the definition of an eHB subject
see: https://github.com/chop-dbhi/ehb-service/blob/master/ehb_service/apps/core/models/identities.py
and its requested representation:
see: https://github.com/chop-dbhi/ehb-client/blob/master/ehb_client/requests/subject_request_handler.py
"""
id = serializers.IntegerField()
first_name = serializers.CharField(max_length=50)
last_name = serializers.CharField(max_length=70)
# organization_id is PK for org in ehb-service
organization_id = serializers.IntegerField()
organization_subject_id = serializers.CharField(max_length=120)
organization_id_label = serializers.CharField(max_length=120)
dob = serializers.DateField()
modified = serializers.DateTimeField()
created = serializers.DateTimeField()
class eHBExternalRecordSerializer(serializers.Serializer):
record_id = serializers.CharField(max_length=120)
subject_id = serializers.IntegerField()
external_system_id = serializers.IntegerField()
modified = serializers.DateTimeField()
created = serializers.DateTimeField()
path = serializers.CharField(max_length=120)
id = serializers.IntegerField()
label_id = serializers.IntegerField()
| 35.65812 | 110 | 0.751198 |
3514fb375cf588ce1ca3d1f580a08c93313caa5c
| 568 |
py
|
Python
|
ABC/abc201-abc250/abc226/c/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 2 |
2020-06-12T09:54:23.000Z
|
2021-05-04T01:34:07.000Z
|
ABC/abc201-abc250/abc226/c/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | 961 |
2020-06-23T07:26:22.000Z
|
2022-03-31T21:34:52.000Z
|
ABC/abc201-abc250/abc226/c/main.py
|
KATO-Hiro/AtCoder
|
cbbdb18e95110b604728a54aed83a6ed6b993fde
|
[
"CC0-1.0"
] | null | null | null |
# -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
b = [list(map(int, input().split())) for _ in range(n)]
ans = list()
master = [False] * n
def dfs(m, ans):
if not master[m]:
master[m] = True
ans.append(b[m][0])
for aij in b[m][2:]:
if master[aij - 1]:
continue
dfs(aij - 1, ans)
dfs(n - 1, ans)
print(sum(ans))
if __name__ == "__main__":
main()
| 17.75 | 59 | 0.454225 |
88542708b09974d1b002314fbfe441303f0fca8a
| 3,512 |
py
|
Python
|
ucsmsdk/mometa/bios/BiosVfEnhancedIntelSpeedStepTech.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/bios/BiosVfEnhancedIntelSpeedStepTech.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
ucsmsdk/mometa/bios/BiosVfEnhancedIntelSpeedStepTech.py
|
anoop1984/python_sdk
|
c4a226bad5e10ad233eda62bc8f6d66a5a82b651
|
[
"Apache-2.0"
] | null | null | null |
"""This module contains the general information for BiosVfEnhancedIntelSpeedStepTech ManagedObject."""
import sys, os
from ...ucsmo import ManagedObject
from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class BiosVfEnhancedIntelSpeedStepTechConsts():
SUPPORTED_BY_DEFAULT_NO = "no"
SUPPORTED_BY_DEFAULT_YES = "yes"
VP_ENHANCED_INTEL_SPEED_STEP_TECH_DISABLED = "disabled"
VP_ENHANCED_INTEL_SPEED_STEP_TECH_ENABLED = "enabled"
VP_ENHANCED_INTEL_SPEED_STEP_TECH_PLATFORM_DEFAULT = "platform-default"
VP_ENHANCED_INTEL_SPEED_STEP_TECH_PLATFORM_RECOMMENDED = "platform-recommended"
class BiosVfEnhancedIntelSpeedStepTech(ManagedObject):
"""This is BiosVfEnhancedIntelSpeedStepTech class."""
consts = BiosVfEnhancedIntelSpeedStepTechConsts()
naming_props = set([])
mo_meta = MoMeta("BiosVfEnhancedIntelSpeedStepTech", "biosVfEnhancedIntelSpeedStepTech", "Enhanced-Intel-SpeedStep-Tech", VersionMeta.Version111j, "InputOutput", 0x3f, [], ["admin", "ls-compute", "ls-config", "ls-server", "ls-server-policy", "pn-policy"], [u'biosSettings', u'biosVProfile'], [], ["Get", "Set"])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version111j, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []),
"prop_acl": MoPropertyMeta("prop_acl", "propAcl", "ulong", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version111j, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"supported_by_default": MoPropertyMeta("supported_by_default", "supportedByDefault", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["no", "yes"], []),
"vp_enhanced_intel_speed_step_tech": MoPropertyMeta("vp_enhanced_intel_speed_step_tech", "vpEnhancedIntelSpeedStepTech", "string", VersionMeta.Version111j, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, ["disabled", "enabled", "platform-default", "platform-recommended"], []),
}
prop_map = {
"childAction": "child_action",
"dn": "dn",
"propAcl": "prop_acl",
"rn": "rn",
"sacl": "sacl",
"status": "status",
"supportedByDefault": "supported_by_default",
"vpEnhancedIntelSpeedStepTech": "vp_enhanced_intel_speed_step_tech",
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.child_action = None
self.prop_acl = None
self.sacl = None
self.status = None
self.supported_by_default = None
self.vp_enhanced_intel_speed_step_tech = None
ManagedObject.__init__(self, "BiosVfEnhancedIntelSpeedStepTech", parent_mo_or_dn, **kwargs)
| 59.525424 | 315 | 0.703588 |
44599aa1324bcae8bde4e518d5e63e291abebfc0
| 1,372 |
py
|
Python
|
python/dune/xt/common/test.py
|
ftschindler-work/dune-xt-common
|
1748530e13dbf683b5bf14289bf3e134485755a8
|
[
"BSD-2-Clause"
] | 2 |
2016-01-05T14:54:52.000Z
|
2020-02-08T04:09:13.000Z
|
python/dune/xt/common/test.py
|
ftschindler-work/dune-xt-common
|
1748530e13dbf683b5bf14289bf3e134485755a8
|
[
"BSD-2-Clause"
] | 119 |
2016-01-06T16:32:14.000Z
|
2020-03-25T08:28:53.000Z
|
python/dune/xt/common/test.py
|
ftschindler-work/dune-xt-common
|
1748530e13dbf683b5bf14289bf3e134485755a8
|
[
"BSD-2-Clause"
] | 5 |
2016-04-13T08:03:45.000Z
|
2020-03-13T10:59:17.000Z
|
# ~~~
# This file is part of the dune-xt-common project:
# https://github.com/dune-community/dune-xt-common
# Copyright 2009-2018 dune-xt-common developers and contributors. All rights reserved.
# License: Dual licensed as BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
# or GPL-2.0+ (http://opensource.org/licenses/gpl-license)
# with "runtime exception" (http://www.dune-project.org/license.html)
# Authors:
# René Fritze (2018)
# ~~~
from pkg_resources import resource_filename, resource_stream
import pkgutil
import logging
import pprint
from loguru import logger
def load_all_submodule(module):
ignore_playground = True
fails = []
for _, module_name, _ in pkgutil.walk_packages(module.__path__, module.__name__ + '.',
lambda n: fails.append((n, ''))):
if ignore_playground and 'playground' in module_name:
continue
try:
__import__(module_name, level=0)
except (TypeError, ImportError) as t:
fails.append((module_name, t))
if len(fails) > 0:
logging.getLogger(module.__name__).fatal('Failed imports: {}'.format(pprint.pformat(fails)))
raise ImportError(module.__name__)
def runmodule(filename):
import pytest
import sys
sys.exit(pytest.main(sys.argv[1:] + [filename]))
| 35.179487 | 100 | 0.663994 |
1dd6b5a8029d41c238008fc78b5ee7889301e2b9
| 17,428 |
py
|
Python
|
disfv1.py
|
ndf-zz/disfv1
|
dc47ab93ced580989e65fc0e9d1be4808c932070
|
[
"MIT"
] | 5 |
2019-09-18T08:57:24.000Z
|
2020-09-29T05:16:32.000Z
|
disfv1.py
|
ndf-zz/disfv1
|
dc47ab93ced580989e65fc0e9d1be4808c932070
|
[
"MIT"
] | 2 |
2020-12-16T18:33:27.000Z
|
2022-01-21T21:31:52.000Z
|
disfv1.py
|
ndf-zz/disfv1
|
dc47ab93ced580989e65fc0e9d1be4808c932070
|
[
"MIT"
] | 1 |
2019-11-26T09:40:07.000Z
|
2019-11-26T09:40:07.000Z
|
#
# disfv1: FV-1 Disassembler
# Copyright (C) 2019-2021 Nathan Fraser
#
# A disassembler for the Spin Semiconductor FV-1 DSP.
# Python2 > 2.6 support
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
# Imports
import argparse
import sys
import struct
from decimal import Decimal
# Constants
VERSION = '1.0.6'
PROGLEN = 128
# Bit Masks
M1 = 0x01
M2 = 0x03
M5 = 0x1f
M6 = 0x3f
M8 = 0xff
M9 = 0x1ff
M11 = 0x7ff
M14 = 0x3fff
M15 = 0x7fff
M16 = 0xffff
M24 = 0xffffff
M27 = 0x7ffffff
M32 = 0xffffffff
def quiet(msg):
pass
def warning(msg):
print(msg, file=sys.stderr)
def error(msg):
print(msg, file=sys.stderr)
# Machine instruction table
op_tbl = {
# opcode: [mnemonic, (arglen,left shift), ...]
0x00: ['RDA', (M15,5),(M11,21)],
0x01: ['RMPA', (M11,21)],
0x02: ['WRA', (M15,5),(M11,21)],
0x03: ['WRAP', (M15,5),(M11,21)],
0x04: ['RDAX', (M6,5),(M16,16)],
0x05: ['RDFX', (M6,5),(M16,16)], # and LDAX
0x06: ['WRAX', (M6,5),(M16,16)],
0x07: ['WRHX', (M6,5),(M16,16)],
0x08: ['WRLX', (M6,5),(M16,16)],
0x09: ['MAXX', (M6,5),(M16,16)], # and ABSA
0x0A: ['MULX', (M6,5)],
0x0B: ['LOG', (M16,16),(M11,5)],
0x0C: ['EXP', (M16,16),(M11,5)],
0x0D: ['SOF', (M16,16),(M11,5)],
0x0E: ['AND', (M24,8)], # and CLR
0x0F: ['OR', (M24,8)],
0x10: ['XOR', (M24,8)], # and NOT
0x11: ['SKP', (M5,27),(M6,21)], # and NOP
0x12: ['WLDX', (M32,0)], # WLDS/WLDR
0x13: ['JAM', (M2,6)],
0x14: ['CHO', (M2,30),(M2,21),(M6,24),(M16,5)],
'WLDS': ['WLDS', (M1,29),(M9,20),(M15,5)],
'WLDR': ['WLDR', (M2,29),(M16,13),(M2,5)],
}
class fv1deparse(object):
def __init__(self, source=None, relative=False,
nopraw=False, wfunc=None):
self.program = []
self.listing = ''
self.dowarn = wfunc
self.relskip = relative
self.nopraw = nopraw
self.source = source
self.jmptbl = { # jump table for skips
}
self.rampamp = {
0x0: '4096',
0x1: '2048',
0x2: '1024',
0x3: '512',
}
self.chotype = {
0x0: 'rda',
0x1: 'rda', # override invalid chotype
0x2: 'sof',
0x3: 'rdal',
}
self.chosel = {
0x0: 'SIN0',
0x1: 'SIN1',
0x2: 'RMP0',
0x3: 'RMP1',
}
self.choflags = {
0x00: 'SIN',
0x01: 'COS',
0x02: 'REG',
0x04: 'COMPC',
0x08: 'COMPA',
0x10: 'RPTR2',
0x20: 'NA',
}
self.skipflags = {
0x10: 'RUN',
0x08: 'ZRC',
0x04: 'ZRO',
0x02: 'GEZ',
0x01: 'NEG',
}
self.regs = {
0x00: 'SIN0_RATE',
0x01: 'SIN0_RANGE',
0x02: 'SIN1_RATE',
0x03: 'SIN1_RANGE',
0x04: 'RMP0_RATE',
0x05: 'RMP0_RANGE',
0x06: 'RMP1_RATE',
0x07: 'RMP1_RANGE',
0x10: 'POT0',
0x11: 'POT1',
0x12: 'POT2',
0x14: 'ADCL',
0x15: 'ADCR',
0x16: 'DACL',
0x17: 'DACR',
0x18: 'ADDR_PTR',
0x20: 'REG0',
0x21: 'REG1',
0x22: 'REG2',
0x23: 'REG3',
0x24: 'REG4',
0x25: 'REG5',
0x26: 'REG6',
0x27: 'REG7',
0x28: 'REG8',
0x29: 'REG9',
0x2a: 'REG10',
0x2b: 'REG11',
0x2c: 'REG12',
0x2d: 'REG13',
0x2e: 'REG14',
0x2f: 'REG15',
0x30: 'REG16',
0x31: 'REG17',
0x32: 'REG18',
0x33: 'REG19',
0x34: 'REG20',
0x35: 'REG21',
0x36: 'REG22',
0x37: 'REG23',
0x38: 'REG24',
0x39: 'REG25',
0x3a: 'REG26',
0x3b: 'REG27',
0x3c: 'REG28',
0x3d: 'REG29',
0x3e: 'REG30',
0x3f: 'REG31',
}
def __reg__(self, reg):
"""Convert a register argument to text."""
ret = '{0:#04x}'.format(reg)
if reg in self.regs:
ret = self.regs[reg]
return ret
def __s1_14__(self, val):
"""Convert and return a S1.14 real as text."""
return str(Decimal(((val&((1<<15)-1))-(val&(1<<15)))/(1<<14)))
def __s1_9__(self, val):
"""Convert and return a S1.9 real as text."""
return str(Decimal(((val&((1<<10)-1))-(val&(1<<10)))/(1<<9)))
def __s4_6__(self, val):
"""Convert and return a S4.6 real as text."""
return str(Decimal(((val&((1<<10)-1))-(val&(1<<10)))/(1<<6)))
def __s_10__(self, val):
"""Convert and return a S.10 real as text."""
return str(Decimal(((val&((1<<10)-1))-(val&(1<<10)))/(1<<10)))
def __i_15__(self, val):
"""Convert and return a signed integer as text."""
return str(Decimal((val&((1<<15)-1))-(val&(1<<15))))
def __s_15__(self, val):
"""Convert and return a S.15 real as text."""
return str(Decimal(((val&((1<<15)-1))-(val&(1<<15)))/(1<<15)))
def __s_23__(self, val):
"""Convert and return a S.23 real as text."""
return str(((val&((1<<23)-1))-(val&(1<<23)))/(1<<23))
def __regmult__(self, inst, address):
"""Extract a register/multiplier instruction: op REG,k"""
reg = inst['args'][0]
mult = inst['args'][1]
if inst['mnemonic'] == 'rdfx' and mult == 0:
inst['mnemonic'] = 'ldax'
inst['argstring'] = self.__reg__(reg)
inst['comment'] = 'reg:{0:#04x}'.format(reg)
elif inst['mnemonic'] == 'maxx' and mult == 0 and reg == 0:
inst['mnemonic'] = 'absa'
inst['comment'] = 'maxx 0,0'
else:
inst['comment'] = 'reg:{0:#04x} k:{1:#06x}'.format(reg, mult)
inst['argstring'] = ','.join([ self.__reg__(reg),
self.__s1_14__(mult) ])
def __cho__(self, inst, address):
"""Extract a CHO instruction."""
typeval = inst['args'][0]
typestr = str(typeval)
if typeval in self.chotype:
typestr = self.chotype[typeval]
sel = inst['args'][1]
selstr = str(sel)
if sel in self.chosel:
selstr = self.chosel[sel]
flags = inst['args'][2]
flagv = []
if flags == 0x00:
flagv.append('SIN')
for flag in sorted(self.choflags):
if flags&flag:
flagv.append(self.choflags[flag])
flagstr = '|'.join(flagv)
d = inst['args'][3]
dstr = None
if typestr == 'rdal':
inst['argstring'] = ','.join(['rdal',selstr,flagstr])
inst['comment'] = 't:{0:#03x} n:{1:#03x} c:{2:#04x}'.format(
typeval, sel, flags)
elif typestr == 'rda':
dstr = str(d)
inst['argstring'] = ','.join(['rda',selstr,flagstr,dstr])
inst['comment'] = 't:{0:#03x} n:{1:#03x} c:{2:#04x} addr:{3:#06x}'.format(
typeval, sel, flags, d)
elif typestr == 'sof':
dstr = self.__s_15__(d)
inst['argstring'] = ','.join(['sof',selstr,flagstr,dstr])
inst['comment'] = 't:{0:#03x} n:{1:#03x} c:{2:#04x} d:{3:#06x}'.format(
typeval, sel, flags, d)
else:
dstr = str(d)
inst['argstring'] = ','.join([typestr,selstr,flagstr,dstr])
inst['comment'] = 't:{0:#03x} n:{1:#03x} c:{2:#04x} addr:{3:#06x}'.format(
typeval, sel, flags, d)
def __jam__(self, inst, address):
"""Extract a JAM instruction."""
lfo = inst['args'][0]|0x2
lfostr = self.chosel[lfo]
inst['comment'] = 'lfo:{0:#03x}'.format(lfo)
inst['argstring'] = lfostr
def __delayop__(self, inst, address):
"""Extract a delay/multiplier instruction: op delay,k"""
offset = inst['args'][0]
mult = inst['args'][1]
inst['comment'] = 'del:{0:#06x} k:{1:#05x}'.format(offset, mult)
inst['argstring'] = ','.join([ str(offset),
self.__s1_9__(mult) ])
def __mulx__(self, inst, address):
"""Extract a mulx instruction."""
reg = inst['args'][0]
inst['comment'] = 'reg:{0:#04x}'.format(reg)
inst['argstring'] = self.__reg__(reg)
def __rmpa__(self, inst, address):
"""Extract a rmpa instruction."""
mult = inst['args'][0]
inst['comment'] = 'k:{0:#05x}'.format(mult)
inst['argstring'] = self.__s1_9__(mult)
def __scaleoft__(self, inst, address):
"""Extract a scale/offset instruction: op k,const"""
mult = inst['args'][0]
offset = inst['args'][1]
inst['comment'] = 'k:{0:#06x} const:{1:#05x}'.format(mult,offset)
ostr = self.__s_10__(offset)
inst['argstring'] = ','.join([ self.__s1_14__(mult), ostr ])
def __bitop__(self, inst, address):
"""Extract a bitwise accumulator operation: op mask"""
mask = inst['args'][0]
if inst['mnemonic'] == 'and' and mask == 0:
inst['mnemonic'] = 'clr'
inst['comment'] = 'and 0'
elif inst['mnemonic'] == 'xor' and mask == 0xffffff:
inst['mnemonic'] = 'not'
inst['comment'] = 'xor 0xffffff'
else:
inst['comment'] = 'val:'.format(mask) + self.__s_23__(mask)
inst['argstring'] = '{0:#08x}'.format(mask)
def __wldx__(self, inst, address):
"""Extract wldr and wlds instructions."""
if inst['command'] & 0x40000000:
# WLDR
ni = self.__decode__(inst['command'], override='WLDR')
inst['args'] = ni['args']
inst['mnemonic'] = 'wldr'
lfo = inst['args'][0]&0x1
freq = inst['args'][1]
amp = inst['args'][2]
ampstr = '{0:01x}'.format(amp)
if amp in self.rampamp:
ampstr = self.rampamp[amp]
inst['argstring'] = ','.join(['RMP'+str(lfo),
self.__i_15__(freq), ampstr ])
inst['comment'] = 'lfo:{0:#03x} f:{1:#06x} a:{2:#03x}'.format(
lfo, freq, amp)
else:
# WLDS
ni = self.__decode__(inst['command'], override='WLDS')
inst['args'] = ni['args']
inst['mnemonic'] = 'wlds'
lfo = inst['args'][0]&0x1
freq = inst['args'][1]
amp = inst['args'][2]
inst['argstring'] = ','.join(['SIN'+str(lfo),
str(freq), str(amp) ])
inst['comment'] = 'lfo:{0:#03x} f:{1:#05x} a:{2:#06x}'.format(
lfo, freq, amp)
def __skp__(self, inst, address):
"""Extract a skp operation."""
flags = inst['args'][0]
offset = inst['args'][1]
targetstr = '{0:d}'.format(offset)
if not self.relskip:
taddr = address+offset+1
targetstr = 'addr{0:02x}'.format(taddr)
self.jmptbl[taddr] = targetstr
inst['target'] = targetstr
inst['comment'] = 'flags:{0:#04x} offset:{1:#04x}'.format(
flags, offset)
flagv = []
if flags == 0:
flagv.append('0')
else:
for flag in self.skipflags:
if flags&flag:
flagv.append(self.skipflags[flag])
inst['argstring'] = ','.join([
'|'.join(flagv),
targetstr ])
def __raw__(self, inst, address):
"""Extract a raw data instruction."""
val = inst['args'][0]
if self.nopraw:
inst['mnemonic'] = 'nop'
else:
inst['argstring'] = '{0:#010x}'.format(val)
inst['comment'] = repr(struct.pack('>I',val))
def __fixinst__(self, inst, address):
"""Examine instruction and extract an assembly equivalent."""
if inst['mnemonic'] == 'skp':
if inst['args'][0] == 0 and inst['args'][1] == 0:
inst['mnemonic'] = 'nop'
inst['comment'] = 'skp 0,0'
else:
self.__skp__(inst, address)
elif inst['mnemonic'] in ['rdax', 'wrax', 'maxx',
'rdfx', 'wrlx', 'wrhx',]:
self.__regmult__(inst, address)
elif inst['mnemonic'] in ['mulx',]:
self.__mulx__(inst, address)
elif inst['mnemonic'] in ['rda', 'wra', 'wrap',]:
self.__delayop__(inst, address)
elif inst['mnemonic'] in ['log', 'exp', 'sof']:
self.__scaleoft__(inst, address)
elif inst['mnemonic'] in ['rmpa',]:
self.__rmpa__(inst, address)
elif inst['mnemonic'] in ['jam',]:
self.__jam__(inst, address)
elif inst['mnemonic'] in ['cho',]:
self.__cho__(inst, address)
elif inst['mnemonic'] in ['wldx',]:
self.__wldx__(inst, address)
elif inst['mnemonic'] in ['and', 'or', 'xor',]:
self.__bitop__(inst, address)
elif inst['mnemonic'] == 'raw':
self.__raw__(inst, address)
else:
self.dowarn('info: Unknown mnemonic: '
+ repr(inst['mnemonic'])
+ ' raw:{0:#010x} at address:{1:#04x}'.format(
inst['command'], address))
if address in self.jmptbl:
inst['label'] = self.jmptbl[address]
def __decode__(self, command, override=None):
"""Decode raw command into opcode and arguments."""
opcode = command&M5
ret = {'opcode':opcode,
'mnemonic':None,
'args':[],
'command':command,
'label':None,
'comment':None,
'argstring':None,
'target':None,
}
if override is not None:
opcode = override
if opcode in op_tbl:
inst = op_tbl[opcode]
ret['mnemonic'] = inst[0].lower()
for arg in inst[1:]:
ret['args'].append((command>>arg[1])&arg[0])
else:
ret['mnemonic'] = 'raw'
ret['args'].append(command)
return ret
def deparse(self):
"""Disassemble input."""
plen = len(self.source)
oft = 0
while oft+3 < plen:
rawinst = struct.unpack_from('>I', self.source, oft)[0]
self.program.append(self.__decode__(rawinst))
oft += 4
cnt = 0
for i in self.program:
self.__fixinst__(i, cnt)
cnt += 1
cnt = len(self.program)-1
while cnt > 0:
if self.program[cnt]['mnemonic'] in ['nop', 'skp']:
del(self.program[cnt])
else:
break
cnt -= 1
for l in self.program:
label = ''
if l['label'] is not None:
label = l['label']+':'
mnemonic = l['mnemonic']
argstring = ''
if l['argstring'] is not None:
argstring = l['argstring']
comment = ''
if l['comment'] is not None:
comment = '; ' + l['comment']
self.listing += '\t'.join([
label, mnemonic, argstring.ljust(23), comment
]) + '\n'
for j in sorted(self.jmptbl):
if j >= len(self.program):
self.listing += self.jmptbl[j] + ':\n'
self.dowarn('info: Read {} instructions.'.format(len(self.program)))
def main():
parser = argparse.ArgumentParser(
description='Disassemble a single FV-1 DSP program.')
parser.add_argument('infile',
type=argparse.FileType('rb'),
help='binary program file',
default=sys.stdin)
parser.add_argument('outfile',
nargs='?',
help='assembly program output file',
default=sys.stdout)
parser.add_argument('-v', '--version',
action='version',
help='print version',
version='%(prog)s ' + VERSION)
parser.add_argument('-q', '--quiet',
action='store_true',
help='suppress warnings')
parser.add_argument('-r', '--relative',
action='store_true',
help='use relative skip targets')
parser.add_argument('-s', '--suppressraw',
action='store_true',
help="convert invalid/raw statements into nop")
parser.add_argument('-p',
help='program number',
type=int, choices=list(range(0,8)))
args = parser.parse_args()
dowarn = warning
if args.quiet:
dowarn = quiet
dowarn('FV-1 Disassembler v' + VERSION)
dowarn('info: Reading input from ' + args.infile.name)
inbuf = args.infile.read(8*4*PROGLEN)
oft = 0
if args.p is not None:
oft = args.p * (PROGLEN*4)
dowarn('info: Reading from program {0} at offset {1:#06x}'.format(
args.p, oft))
fp = fv1deparse(inbuf[oft:oft+(PROGLEN*4)],
relative=args.relative, nopraw=args.suppressraw,
wfunc=dowarn)
fp.deparse()
ofile = None
if args.outfile is sys.stdout:
ofile = args.outfile
else:
try:
ofile = open(args.outfile, 'w')
except Exception as e:
error('error: writing output: ' + str(e))
sys.exit(-1)
ofile.write(fp.listing)
ofile.close()
if __name__ == '__main__':
main()
| 33.070209 | 86 | 0.493115 |
4aeb507dcab73eaa19fed6706746abc134ecc9ff
| 610 |
gyp
|
Python
|
v8_4_5/build/all.gyp
|
wenfeifei/miniblink49
|
2ed562ff70130485148d94b0e5f4c343da0c2ba4
|
[
"Apache-2.0"
] | 5,964 |
2016-09-27T03:46:29.000Z
|
2022-03-31T16:25:27.000Z
|
v8_4_5/build/all.gyp
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 479 |
2016-02-10T00:21:41.000Z
|
2020-11-26T09:40:03.000Z
|
v8_4_5/build/all.gyp
|
w4454962/miniblink49
|
b294b6eacb3333659bf7b94d670d96edeeba14c0
|
[
"Apache-2.0"
] | 1,006 |
2016-09-27T05:17:27.000Z
|
2022-03-30T02:46:51.000Z
|
# Copyright 2011 the V8 project authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'All',
'type': 'none',
'dependencies': [
'../samples/samples.gyp:*',
'../src/d8.gyp:d8',
'../test/cctest/cctest.gyp:*',
'../test/unittests/unittests.gyp:*',
],
'conditions': [
['component!="shared_library"', {
'dependencies': [
'../tools/parser-shell.gyp:parser-shell',
],
}],
]
}
]
}
| 23.461538 | 72 | 0.513115 |
e6810bfd1f4e481c1f92b1f7b34392a590d1852b
| 412 |
py
|
Python
|
regular_expression.py
|
keerthana1502/python_practice
|
8c0499e014826af78f9a88730551ace3fa79686d
|
[
"bzip2-1.0.6"
] | null | null | null |
regular_expression.py
|
keerthana1502/python_practice
|
8c0499e014826af78f9a88730551ace3fa79686d
|
[
"bzip2-1.0.6"
] | null | null | null |
regular_expression.py
|
keerthana1502/python_practice
|
8c0499e014826af78f9a88730551ace3fa79686d
|
[
"bzip2-1.0.6"
] | null | null | null |
import re
a="hello world"
x=re.findall("he..o",a)
print(x)
y=re.findall("[a-z]",a)
print(y)
z=re.findall("^h",a)
print(z)
b=re.findall("d$",a)
print(b)
c=re.findall("h.*o",a)
print(c)
d=re.findall("h.+o",a)
print(d)
e=re.findall("h.?",a)
print(e)
f=re.findall("h.{3}o",a)
print(f)
g=re.findall("hello/world",a)
print(g)
h=re.findall("\Ah",a)
print(h)
i=re.findall("\Bd",a)
print(i)
j=re.findall("\Bl",a)
print(j)
| 15.846154 | 29 | 0.616505 |
b27838f3e4db1a0f021ac12e661a8d9c98f15091
| 621 |
py
|
Python
|
setup.py
|
demonCoder95/Gerrit-to-Github-Issues
|
268be3f5a2865c67caa9778b80f242f15792e55c
|
[
"Apache-2.0"
] | 2 |
2020-02-26T21:00:44.000Z
|
2020-04-17T20:16:57.000Z
|
setup.py
|
demonCoder95/Gerrit-to-Github-Issues
|
268be3f5a2865c67caa9778b80f242f15792e55c
|
[
"Apache-2.0"
] | 7 |
2020-05-04T19:31:16.000Z
|
2021-03-24T19:09:50.000Z
|
setup.py
|
airshipit/gerrit-to-github-bot
|
3c62ac1eefe07343c7525733b4096307d1a3ebcd
|
[
"Apache-2.0"
] | 2 |
2020-04-18T13:29:06.000Z
|
2020-04-30T00:04:19.000Z
|
# Licensed under the Apache License, Version 2.0 (the 'License');
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True)
| 41.4 | 74 | 0.763285 |
6d254bf4731044195ff76da6dcb415af1902a773
| 13,524 |
py
|
Python
|
tests/test_data/test_datasets/test_s3dis_dataset.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 2,216 |
2020-07-09T19:10:11.000Z
|
2022-03-31T12:39:26.000Z
|
tests/test_data/test_datasets/test_s3dis_dataset.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 1,174 |
2020-07-10T07:02:28.000Z
|
2022-03-31T12:38:56.000Z
|
tests/test_data/test_datasets/test_s3dis_dataset.py
|
Guangyun-Xu/mmdetection3d
|
75c5c6cd590386bd1539a686c5fd2cc45c5480d5
|
[
"Apache-2.0"
] | 681 |
2020-07-09T19:40:06.000Z
|
2022-03-31T11:02:24.000Z
|
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet3d.datasets import S3DISDataset, S3DISSegDataset
def test_getitem():
np.random.seed(0)
root_path = './tests/data/s3dis/'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('table', 'chair', 'sofa', 'bookcase', 'board')
pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(type='LoadAnnotations3D', with_bbox_3d=True, with_label_3d=True),
dict(type='PointSample', num_points=40000),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D', keys=['points', 'gt_bboxes_3d', 'gt_labels_3d'])
]
s3dis_dataset = S3DISDataset(
data_root=root_path, ann_file=ann_file, pipeline=pipeline)
data = s3dis_dataset[0]
points = data['points']._data
gt_bboxes_3d = data['gt_bboxes_3d']._data
gt_labels_3d = data['gt_labels_3d']._data
expected_gt_bboxes_3d = torch.tensor(
[[2.3080, 2.4175, 0.2010, 0.8820, 0.8690, 0.6970, 0.0000],
[2.4730, 0.7090, 0.2010, 0.9080, 0.9620, 0.7030, 0.0000],
[5.3235, 0.4910, 0.0740, 0.8410, 0.9020, 0.8790, 0.0000]])
expected_gt_labels = np.array([1, 1, 3, 1, 2, 0, 0, 0, 3])
assert tuple(points.shape) == (40000, 6)
assert torch.allclose(gt_bboxes_3d[:3].tensor, expected_gt_bboxes_3d, 1e-2)
assert np.all(gt_labels_3d.numpy() == expected_gt_labels)
def test_evaluate():
if not torch.cuda.is_available():
pytest.skip()
from mmdet3d.core.bbox.structures import DepthInstance3DBoxes
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISDataset(root_path, ann_file)
results = []
pred_boxes = dict()
pred_boxes['boxes_3d'] = DepthInstance3DBoxes(
torch.tensor([[2.3080, 2.4175, 0.2010, 0.8820, 0.8690, 0.6970, 0.0000],
[2.4730, 0.7090, 0.2010, 0.9080, 0.9620, 0.7030, 0.0000],
[5.3235, 0.4910, 0.0740, 0.8410, 0.9020, 0.8790,
0.0000]]))
pred_boxes['labels_3d'] = torch.tensor([1, 1, 3])
pred_boxes['scores_3d'] = torch.tensor([0.5, 1.0, 1.0])
results.append(pred_boxes)
ret_dict = s3dis_dataset.evaluate(results)
assert abs(ret_dict['chair_AP_0.25'] - 0.666) < 0.01
assert abs(ret_dict['chair_AP_0.50'] - 0.666) < 0.01
assert abs(ret_dict['bookcase_AP_0.25'] - 0.5) < 0.01
assert abs(ret_dict['bookcase_AP_0.50'] - 0.5) < 0.01
def test_seg_getitem():
np.random.seed(0)
root_path = './tests/data/s3dis/'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
scene_idxs = [0 for _ in range(20)]
pipelines = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=tuple(range(len(class_names))),
max_cat_id=13),
dict(
type='IndoorPatchPointSample',
num_points=5,
block_size=1.0,
ignore_index=len(class_names),
use_normalized_coord=True,
enlarge_size=0.2,
min_unique_num=None),
dict(type='NormalizePointsColor', color_mean=None),
dict(type='DefaultFormatBundle3D', class_names=class_names),
dict(
type='Collect3D',
keys=['points', 'pts_semantic_mask'],
meta_keys=['file_name', 'sample_idx'])
]
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=pipelines,
classes=None,
palette=None,
modality=None,
test_mode=False,
ignore_index=None,
scene_idxs=scene_idxs)
data = s3dis_dataset[0]
points = data['points']._data
pts_semantic_mask = data['pts_semantic_mask']._data
file_name = data['img_metas']._data['file_name']
sample_idx = data['img_metas']._data['sample_idx']
assert file_name == './tests/data/s3dis/points/Area_1_office_2.bin'
assert sample_idx == 'Area_1_office_2'
expected_points = torch.tensor([[
0.0000, 0.0000, 3.1720, 0.4706, 0.4431, 0.3725, 0.4624, 0.7502, 0.9543
], [
0.2880, -0.5900, 0.0650, 0.3451, 0.3373, 0.3490, 0.5119, 0.5518, 0.0196
], [
0.1570, 0.6000, 3.1700, 0.4941, 0.4667, 0.3569, 0.4893, 0.9519, 0.9537
], [
-0.1320, 0.3950, 0.2720, 0.3216, 0.2863, 0.2275, 0.4397, 0.8830, 0.0818
],
[
-0.4860, -0.0640, 3.1710, 0.3843,
0.3725, 0.3059, 0.3789, 0.7286, 0.9540
]])
expected_pts_semantic_mask = np.array([0, 1, 0, 8, 0])
original_classes = s3dis_dataset.CLASSES
original_palette = s3dis_dataset.PALETTE
assert s3dis_dataset.CLASSES == class_names
assert s3dis_dataset.ignore_index == 13
assert torch.allclose(points, expected_points, 1e-2)
assert np.all(pts_semantic_mask.numpy() == expected_pts_semantic_mask)
assert original_classes == class_names
assert original_palette == palette
assert s3dis_dataset.scene_idxs.dtype == np.int32
assert np.all(s3dis_dataset.scene_idxs == np.array(scene_idxs))
# test dataset with selected classes
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
classes=['beam', 'window'],
scene_idxs=scene_idxs)
label_map = {i: 13 for i in range(14)}
label_map.update({3: 0, 5: 1})
assert s3dis_dataset.CLASSES != original_classes
assert s3dis_dataset.CLASSES == ['beam', 'window']
assert s3dis_dataset.PALETTE == [palette[3], palette[5]]
assert s3dis_dataset.VALID_CLASS_IDS == [3, 5]
assert s3dis_dataset.label_map == label_map
assert s3dis_dataset.label2cat == {0: 'beam', 1: 'window'}
# test load classes from file
import tempfile
tmp_file = tempfile.NamedTemporaryFile()
with open(tmp_file.name, 'w') as f:
f.write('beam\nwindow\n')
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
classes=tmp_file.name,
scene_idxs=scene_idxs)
assert s3dis_dataset.CLASSES != original_classes
assert s3dis_dataset.CLASSES == ['beam', 'window']
assert s3dis_dataset.PALETTE == [palette[3], palette[5]]
assert s3dis_dataset.VALID_CLASS_IDS == [3, 5]
assert s3dis_dataset.label_map == label_map
assert s3dis_dataset.label2cat == {0: 'beam', 1: 'window'}
# test scene_idxs in dataset
# we should input scene_idxs in train mode
with pytest.raises(NotImplementedError):
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
scene_idxs=None)
# test mode
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=ann_file,
pipeline=None,
test_mode=True,
scene_idxs=scene_idxs)
assert np.all(s3dis_dataset.scene_idxs == np.array([0]))
def test_seg_evaluate():
if not torch.cuda.is_available():
pytest.skip()
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISSegDataset(
data_root=root_path, ann_files=ann_file, test_mode=True)
results = []
pred_sem_mask = dict(
semantic_mask=torch.tensor([
2, 3, 1, 2, 2, 6, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 3, 1, 2,
0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 3, 2, 2, 2, 2, 2, 3, 0, 0, 4, 6,
7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7,
2, 2, 0, 2, 6, 2, 12, 6, 3, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 11,
0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 3, 2
]).long())
results.append(pred_sem_mask)
ret_dict = s3dis_dataset.evaluate(results)
assert abs(ret_dict['miou'] - 0.7625) < 0.01
assert abs(ret_dict['acc'] - 0.9) < 0.01
assert abs(ret_dict['acc_cls'] - 0.9074) < 0.01
def test_seg_show():
import mmcv
import tempfile
from os import path as osp
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
s3dis_dataset = S3DISSegDataset(
data_root=root_path, ann_files=ann_file, scene_idxs=[0])
result = dict(
semantic_mask=torch.tensor([
2, 2, 1, 2, 2, 5, 1, 0, 1, 1, 9, 12, 3, 0, 2, 0, 2, 0, 8, 2, 0, 2,
0, 2, 1, 7, 2, 10, 2, 0, 0, 0, 2, 2, 2, 2, 2, 1, 2, 2, 0, 0, 4, 6,
7, 2, 1, 2, 0, 1, 7, 0, 2, 2, 2, 0, 2, 2, 1, 12, 0, 2, 2, 2, 2, 7,
2, 2, 0, 2, 6, 2, 12, 6, 2, 12, 2, 1, 6, 1, 2, 6, 8, 2, 10, 1, 10,
0, 6, 9, 4, 3, 0, 0, 12, 1, 1, 5, 2, 2
]).long())
results = [result]
s3dis_dataset.show(results, temp_dir, show=False)
pts_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_points.obj')
gt_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_gt.obj')
pred_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
# test show with pipeline
tmp_dir = tempfile.TemporaryDirectory()
temp_dir = tmp_dir.name
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
eval_pipeline = [
dict(
type='LoadPointsFromFile',
coord_type='DEPTH',
shift_height=False,
use_color=True,
load_dim=6,
use_dim=[0, 1, 2, 3, 4, 5]),
dict(
type='LoadAnnotations3D',
with_bbox_3d=False,
with_label_3d=False,
with_mask_3d=False,
with_seg_3d=True),
dict(
type='PointSegClassMapping',
valid_cat_ids=tuple(range(len(class_names))),
max_cat_id=13),
dict(
type='DefaultFormatBundle3D',
with_label=False,
class_names=class_names),
dict(type='Collect3D', keys=['points', 'pts_semantic_mask'])
]
s3dis_dataset.show(results, temp_dir, show=False, pipeline=eval_pipeline)
pts_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_points.obj')
gt_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_gt.obj')
pred_file_path = osp.join(temp_dir, 'Area_1_office_2',
'Area_1_office_2_pred.obj')
mmcv.check_file_exist(pts_file_path)
mmcv.check_file_exist(gt_file_path)
mmcv.check_file_exist(pred_file_path)
tmp_dir.cleanup()
def test_multi_areas():
# S3DIS dataset has 6 areas, we often train on several of them
# need to verify the concat function of S3DISSegDataset
root_path = './tests/data/s3dis'
ann_file = './tests/data/s3dis/s3dis_infos.pkl'
class_names = ('ceiling', 'floor', 'wall', 'beam', 'column', 'window',
'door', 'table', 'chair', 'sofa', 'bookcase', 'board',
'clutter')
palette = [[0, 255, 0], [0, 0, 255], [0, 255, 255], [255, 255, 0],
[255, 0, 255], [100, 100, 255], [200, 200, 100],
[170, 120, 200], [255, 0, 0], [200, 100, 100], [10, 200, 100],
[200, 200, 200], [50, 50, 50]]
scene_idxs = [0 for _ in range(20)]
# repeat
repeat_num = 3
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=[ann_file for _ in range(repeat_num)],
scene_idxs=scene_idxs)
assert s3dis_dataset.CLASSES == class_names
assert s3dis_dataset.PALETTE == palette
assert len(s3dis_dataset.data_infos) == repeat_num
assert np.all(s3dis_dataset.scene_idxs == np.concatenate(
[np.array(scene_idxs) + i for i in range(repeat_num)]))
# different scene_idxs input
s3dis_dataset = S3DISSegDataset(
data_root=root_path,
ann_files=[ann_file for _ in range(repeat_num)],
scene_idxs=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 3, 4], [0, 1, 1, 2, 2, 2]])
assert np.all(s3dis_dataset.scene_idxs == np.array(
[0, 0, 1, 2, 2, 3, 4, 5, 6, 6, 7, 8, 9, 9, 10, 10, 10]))
| 38.862069 | 79 | 0.58533 |
e109d7bbbe3073feedfbac6428381a98f90aad94
| 1,742 |
py
|
Python
|
app/user/serializers.py
|
tejasvadgama5/recipe-app-api
|
670ec021fb75f99f490079baa105b7c4e58050ab
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
tejasvadgama5/recipe-app-api
|
670ec021fb75f99f490079baa105b7c4e58050ab
|
[
"MIT"
] | null | null | null |
app/user/serializers.py
|
tejasvadgama5/recipe-app-api
|
670ec021fb75f99f490079baa105b7c4e58050ab
|
[
"MIT"
] | 1 |
2021-11-12T12:39:36.000Z
|
2021-11-12T12:39:36.000Z
|
from django.contrib.auth import get_user_model, authenticate
from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
class UserSerializer(serializers.ModelSerializer):
"""Serializer for users objects"""
class Meta:
model = get_user_model()
fields = ('email', 'password', 'name')
extra_kwargs = {'password': {'write_only': True, 'min_length': 5}}
def create(self, validated_data):
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data)
def update(self, instance, validated_data):
"""Update a new user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data=validated_data)
if password:
user.set_password(password)
user.save()
return user
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for user authentication objects"""
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email')
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authentication')
attrs['user'] = user
return attrs
| 31.672727 | 78 | 0.64868 |
099d274b7f1d96166eca8ad57af371f0e3951dcc
| 3,168 |
py
|
Python
|
tests/misc/test_SetValueTransformer.py
|
bissoligiulia/tubular
|
878fa0d484ab1e8688e40680f51b2dcaa15abe2e
|
[
"BSD-3-Clause"
] | 32 |
2021-04-26T13:04:26.000Z
|
2022-03-18T16:22:13.000Z
|
tests/misc/test_SetValueTransformer.py
|
bissoligiulia/tubular
|
878fa0d484ab1e8688e40680f51b2dcaa15abe2e
|
[
"BSD-3-Clause"
] | 15 |
2021-05-08T09:46:48.000Z
|
2021-11-23T11:40:15.000Z
|
tests/misc/test_SetValueTransformer.py
|
bissoligiulia/tubular
|
878fa0d484ab1e8688e40680f51b2dcaa15abe2e
|
[
"BSD-3-Clause"
] | 6 |
2021-05-05T08:48:00.000Z
|
2021-08-17T12:31:32.000Z
|
import pytest
import test_aide as ta
import tests.test_data as d
import tubular
from tubular.misc import SetValueTransformer
class TestInit:
"""Tests for the SetValueTransformer.__init__ method."""
def test_arguments(self):
"""Test that init has expected arguments."""
ta.functions.test_function_arguments(
func=SetValueTransformer.__init__,
expected_arguments=["self", "columns", "value"],
expected_default_values=None,
)
def test_inheritance(self):
"""Test SetValueTransformer inherits from BaseTransformer."""
x = SetValueTransformer(columns=["a"], value=1)
assert isinstance(
x, tubular.base.BaseTransformer
), "SetValueTransformer is not instance of tubular.base.BaseTransformer"
def test_super_init_call(self, mocker):
"""Test that BaseTransformer.init us called as expected."""
expected_call_args = {
0: {
"args": (),
"kwargs": {"columns": ["a", "b"], "verbose": False, "copy": False},
}
}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "__init__", expected_call_args
):
SetValueTransformer(columns=["a", "b"], value=1, verbose=False, copy=False)
def test_value_attribute_set(self):
"""Test that the value passed in the value arg is set as an attribute of the same name."""
x = SetValueTransformer(columns=["a", "b"], value=1)
assert x.value == 1, "unexpected value set to value atttribute"
class TestTransform:
"""Tests for the SetValueTransformer.transform method."""
def expected_df_1():
"""Expected output of test_value_set_in_transform."""
df = d.create_df_2()
df["a"] = "a"
df["b"] = "a"
return df
def test_arguments(self):
"""Test that transform has expected arguments."""
ta.functions.test_function_arguments(
func=SetValueTransformer.transform,
expected_arguments=["self", "X"],
expected_default_values=None,
)
def test_super_transform_called(self, mocker):
"""Test that BaseTransformer.transform called."""
df = d.create_df_7()
x = SetValueTransformer(columns=["a", "b"], value=1)
expected_call_args = {0: {"args": (d.create_df_7(),), "kwargs": {}}}
with ta.functions.assert_function_call(
mocker, tubular.base.BaseTransformer, "transform", expected_call_args
):
x.transform(df)
@pytest.mark.parametrize(
"df, expected",
ta.pandas.adjusted_dataframe_params(d.create_df_2(), expected_df_1()),
)
def test_value_set_in_transform(self, df, expected):
"""Test that transform sets the value as expected."""
x = SetValueTransformer(columns=["a", "b"], value="a")
df_transformed = x.transform(df)
ta.equality.assert_equal_dispatch(
actual=df_transformed,
expected=expected,
msg="incorrect value after SetValueTransformer transform",
)
| 29.607477 | 98 | 0.621212 |
56cd75421b190a5055e27abb7a8c56321a12ad87
| 576 |
py
|
Python
|
elections/migrations/0053_presidentcandidate_is_active.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | 2 |
2018-11-16T21:45:17.000Z
|
2019-02-03T19:55:46.000Z
|
elections/migrations/0053_presidentcandidate_is_active.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | 13 |
2018-08-17T19:12:11.000Z
|
2022-03-11T23:27:41.000Z
|
elections/migrations/0053_presidentcandidate_is_active.py
|
zinaukarenku/zkr-platform
|
8daf7d1206c482f1f8e0bcd54d4fde783e568774
|
[
"Apache-2.0"
] | null | null | null |
# Generated by Django 2.1.7 on 2019-05-21 00:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('elections', '0052_europarliamentcandidateconviction_country'),
]
operations = [
migrations.AddField(
model_name='presidentcandidate',
name='is_active',
field=models.BooleanField(db_index=True, default=True, help_text='Indikuoja ar kandidatas į prezidentus matomas prezidenų sąraše bei galima užduoti naują klausimą.', verbose_name='Aktyvus'),
),
]
| 30.315789 | 202 | 0.685764 |
b4d50258a9f6148346a7ebfb5135e91cdad12ede
| 9,125 |
py
|
Python
|
misc/doc/sources/conf.py
|
pyghassen/jasmin
|
d6bf0b40bb72e406bcb0dd3a56064a28efd7c6b3
|
[
"Apache-2.0"
] | null | null | null |
misc/doc/sources/conf.py
|
pyghassen/jasmin
|
d6bf0b40bb72e406bcb0dd3a56064a28efd7c6b3
|
[
"Apache-2.0"
] | null | null | null |
misc/doc/sources/conf.py
|
pyghassen/jasmin
|
d6bf0b40bb72e406bcb0dd3a56064a28efd7c6b3
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath', 'sphinx.ext.mathjax', 'sphinx.ext.ifconfig', 'sphinx.ext.viewcode']
todo_include_todos=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Jasmin SMS Gateway'
copyright = u'2015, Jasmin'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.6"
# The full version, including alpha/beta/rc tags.
release = "0.6.0-beta"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {'collapsiblesidebar': True}
html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['_themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = 'Jasmin SMS Gateway'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = 'Jasmin SMS Gateway documentation'
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/jasmin-small.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'jasmindoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'jasmin.tex', u'Jasmin Documentation',
u'Fourat ZOUARI', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'jasmin', u'Jasmin Documentation',
[u'Fourat ZOUARI'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'jasmin', u'Jasmin Documentation',
u'Fourat ZOUARI', 'jasmin', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# -- Options for Epub output ---------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = u'jasmin sms gateway'
epub_author = u'Fourat ZOUARI'
epub_publisher = u'Fourat ZOUARI'
epub_copyright = u'2015, Fourat ZOUARI'
# The language of the text. It defaults to the language option
# or en if the language is not set.
#epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
#epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#epub_identifier = ''
# A unique identification for the text.
#epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
#epub_cover = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
#epub_post_files = []
# A list of files that should not be packed into the epub file.
#epub_exclude_files = []
# The depth of the table of contents in toc.ncx.
#epub_tocdepth = 3
# Allow duplicate toc entries.
#epub_tocdup = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
# Jasmin vars
author = u'Fourat ZOUARI'
| 32.130282 | 215 | 0.71463 |
7164a761484071cad13378d533dabd21bf1ef622
| 11,248 |
py
|
Python
|
spark_fhir_schemas/r4/complex_types/chargeitemdefinition_propertygroup.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | 2 |
2020-10-31T23:25:01.000Z
|
2021-06-09T14:12:42.000Z
|
spark_fhir_schemas/r4/complex_types/chargeitemdefinition_propertygroup.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
spark_fhir_schemas/r4/complex_types/chargeitemdefinition_propertygroup.py
|
imranq2/SparkFhirSchemas
|
24debae6980fb520fe55aa199bdfd43c0092eb9c
|
[
"Apache-2.0"
] | null | null | null |
from typing import Union, List, Optional
from pyspark.sql.types import StructType, StructField, StringType, ArrayType, DataType
# This file is auto-generated by generate_schema so do not edit it manually
# noinspection PyPep8Naming
class ChargeItemDefinition_PropertyGroupSchema:
"""
The ChargeItemDefinition resource provides the properties that apply to the
(billing) codes necessary to calculate costs and prices. The properties may
differ largely depending on type and realm, therefore this resource gives only
a rough structure and requires profiling for each type of billing code system.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueUrl",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
include_modifierExtension: Optional[bool] = False,
) -> Union[StructType, DataType]:
"""
The ChargeItemDefinition resource provides the properties that apply to the
(billing) codes necessary to calculate costs and prices. The properties may
differ largely depending on type and realm, therefore this resource gives only
a rough structure and requires profiling for each type of billing code system.
id: Unique id for the element within a resource (for internal references). This
may be any string value that does not contain spaces.
extension: May be used to represent additional information that is not part of the basic
definition of the element. To make the use of extensions safe and manageable,
there is a strict set of governance applied to the definition and use of
extensions. Though any implementer can define an extension, there is a set of
requirements that SHALL be met as part of the definition of the extension.
modifierExtension: May be used to represent additional information that is not part of the basic
definition of the element and that modifies the understanding of the element
in which it is contained and/or the understanding of the containing element's
descendants. Usually modifier elements provide negation or qualification. To
make the use of extensions safe and manageable, there is a strict set of
governance applied to the definition and use of extensions. Though any
implementer can define an extension, there is a set of requirements that SHALL
be met as part of the definition of the extension. Applications processing a
resource are required to check for modifier extensions.
Modifier extensions SHALL NOT change the meaning of any elements on Resource
or DomainResource (including cannot change the meaning of modifierExtension
itself).
applicability: Expressions that describe applicability criteria for the priceComponent.
priceComponent: The price for a ChargeItem may be calculated as a base price with
surcharges/deductions that apply in certain conditions. A ChargeItemDefinition
resource that defines the prices, factors and conditions that apply to a
billing code is currently under development. The priceComponent element can be
used to offer transparency to the recipient of the Invoice of how the prices
have been calculated.
"""
from spark_fhir_schemas.r4.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.r4.complex_types.chargeitemdefinition_applicability import (
ChargeItemDefinition_ApplicabilitySchema,
)
from spark_fhir_schemas.r4.complex_types.chargeitemdefinition_pricecomponent import (
ChargeItemDefinition_PriceComponentSchema,
)
if (
max_recursion_limit
and nesting_list.count("ChargeItemDefinition_PropertyGroup")
>= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + [
"ChargeItemDefinition_PropertyGroup"
]
schema = StructType(
[
# Unique id for the element within a resource (for internal references). This
# may be any string value that does not contain spaces.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the element. To make the use of extensions safe and manageable,
# there is a strict set of governance applied to the definition and use of
# extensions. Though any implementer can define an extension, there is a set of
# requirements that SHALL be met as part of the definition of the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# May be used to represent additional information that is not part of the basic
# definition of the element and that modifies the understanding of the element
# in which it is contained and/or the understanding of the containing element's
# descendants. Usually modifier elements provide negation or qualification. To
# make the use of extensions safe and manageable, there is a strict set of
# governance applied to the definition and use of extensions. Though any
# implementer can define an extension, there is a set of requirements that SHALL
# be met as part of the definition of the extension. Applications processing a
# resource are required to check for modifier extensions.
#
# Modifier extensions SHALL NOT change the meaning of any elements on Resource
# or DomainResource (including cannot change the meaning of modifierExtension
# itself).
StructField(
"modifierExtension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# Expressions that describe applicability criteria for the priceComponent.
StructField(
"applicability",
ArrayType(
ChargeItemDefinition_ApplicabilitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
# The price for a ChargeItem may be calculated as a base price with
# surcharges/deductions that apply in certain conditions. A ChargeItemDefinition
# resource that defines the prices, factors and conditions that apply to a
# billing code is currently under development. The priceComponent element can be
# used to offer transparency to the recipient of the Invoice of how the prices
# have been calculated.
StructField(
"priceComponent",
ArrayType(
ChargeItemDefinition_PriceComponentSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
include_modifierExtension=include_modifierExtension,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
if not include_modifierExtension:
schema.fields = [
c
if c.name != "modifierExtension"
else StructField("modifierExtension", StringType(), True)
for c in schema.fields
]
return schema
| 51.59633 | 104 | 0.595484 |
c9d696262109491d489db9e26bc567a8c8e1475f
| 734 |
py
|
Python
|
examples/script_matplotlib.py
|
kwagstyl/matplotlib_surface_plotting
|
5949e0a221eb63f53672b9a9dd297920f4de51a0
|
[
"MIT"
] | 21 |
2020-03-23T11:56:16.000Z
|
2022-03-18T04:37:04.000Z
|
examples/script_matplotlib.py
|
kwagstyl/matplotlib_surface_plotting
|
5949e0a221eb63f53672b9a9dd297920f4de51a0
|
[
"MIT"
] | null | null | null |
examples/script_matplotlib.py
|
kwagstyl/matplotlib_surface_plotting
|
5949e0a221eb63f53672b9a9dd297920f4de51a0
|
[
"MIT"
] | 3 |
2020-03-24T16:23:55.000Z
|
2021-03-10T13:02:47.000Z
|
import os, sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from matplotlib_surface_plotting import plot_surf
import nibabel as nb
import numpy as np
vertices, faces=nb.freesurfer.io.read_geometry('../data/lh.inflated')
overlay = nb.freesurfer.io.read_morph_data('../data/lh.thickness')
#optional masking of medial wall
cortex=nb.freesurfer.io.read_label('../data/lh.cortex.label')
mask=np.ones_like(overlay).astype(bool)
mask[cortex]=0
overlay[mask]=np.min(overlay)
plot_surf( vertices, faces, overlay, rotate=[90,270], filename='demo_plot.png',
vmax = np.max(overlay[cortex]),vmin=np.min(overlay[cortex]),mask=mask,
pvals=np.ones_like(overlay), cmap_label='thickness \n(mm)')
| 36.7 | 80 | 0.750681 |
f129f490c0b6e782db1e0d0da7cabb13b617e567
| 3,041 |
py
|
Python
|
tools/wafadmin/3rdparty/go.py
|
rohankumardubey/node
|
d49d53fd499f7cf68fdfcc7d0c9d401e4e4407fb
|
[
"MIT"
] | 3 |
2015-11-08T08:52:16.000Z
|
2022-03-19T07:35:26.000Z
|
tools/wafadmin/3rdparty/go.py
|
rohankumardubey/node
|
d49d53fd499f7cf68fdfcc7d0c9d401e4e4407fb
|
[
"MIT"
] | null | null | null |
tools/wafadmin/3rdparty/go.py
|
rohankumardubey/node
|
d49d53fd499f7cf68fdfcc7d0c9d401e4e4407fb
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
# go.py - Waf tool for the Go programming language
# By: Tom Wambold <[email protected]>
import platform
import Task
import Utils
from TaskGen import feature, extension, after
Task.simple_task_type('gocompile', '${GOC} ${GOCFLAGS} -o ${TGT} ${SRC}', shell=False)
Task.simple_task_type('gopack', '${GOP} grc ${TGT} ${SRC}', shell=False)
Task.simple_task_type('golink', '${GOL} ${GOLFLAGS} -o ${TGT} ${SRC}', shell=False)
def detect(conf):
def set_def(var, val):
if not conf.env[var]:
conf.env[var] = val
set_def('GO_PLATFORM', platform.machine())
if conf.env.GO_PLATFORM == 'x86_64':
set_def('GO_COMPILER', '6g')
set_def('GO_LINKER', '6l')
set_def('GO_EXTENSION', '.6')
elif conf.env.GO_PLATFORM == 'i386':
set_def('GO_COMPILER', '8g')
set_def('GO_LINKER', '8l')
set_def('GO_EXTENSION', '.8')
if not (conf.env.GO_COMPILER or conf.env.GO_LINKER or conf.env.GO_EXTENSION):
raise conf.fatal('Unsupported platform ' + platform.machine())
set_def('GO_PACK', 'gopack')
set_def('GO_PACK_EXTENSION', '.a')
conf.find_program(conf.env.GO_COMPILER, var='GOC', mandatory=True)
conf.find_program(conf.env.GO_LINKER, var='GOL', mandatory=True)
conf.find_program(conf.env.GO_PACK, var='GOP', mandatory=True)
@extension('.go')
def compile_go(self, node):
try:
self.go_nodes.append(node)
except AttributeError:
self.go_nodes = [node]
@feature('go')
@after('apply_core')
def apply_compile_go(self):
try:
nodes = self.go_nodes
except AttributeError:
self.go_compile_task = None
else:
self.go_compile_task = self.create_task('gocompile',
nodes,
[self.path.find_or_declare(self.target + self.env.GO_EXTENSION)])
@feature('gopackage', 'goprogram')
@after('apply_compile_go')
def apply_goinc(self):
if not getattr(self, 'go_compile_task', None):
return
names = self.to_list(getattr(self, 'uselib_local', []))
for name in names:
obj = self.name_to_obj(name)
if not obj:
raise Utils.WafError('object %r was not found in uselib_local '
'(required by %r)' % (lib_name, self.name))
obj.post()
self.go_compile_task.set_run_after(obj.go_package_task)
self.go_compile_task.deps_nodes.extend(obj.go_package_task.outputs)
self.env.append_unique('GOCFLAGS', '-I' + obj.path.abspath(obj.env))
self.env.append_unique('GOLFLAGS', '-L' + obj.path.abspath(obj.env))
@feature('gopackage')
@after('apply_goinc')
def apply_gopackage(self):
self.go_package_task = self.create_task('gopack',
self.go_compile_task.outputs[0],
self.path.find_or_declare(self.target + self.env.GO_PACK_EXTENSION))
self.go_package_task.set_run_after(self.go_compile_task)
self.go_package_task.deps_nodes.extend(self.go_compile_task.outputs)
@feature('goprogram')
@after('apply_goinc')
def apply_golink(self):
self.go_link_task = self.create_task('golink',
self.go_compile_task.outputs[0],
self.path.find_or_declare(self.target))
self.go_link_task.set_run_after(self.go_compile_task)
self.go_link_task.deps_nodes.extend(self.go_compile_task.outputs)
| 31.030612 | 86 | 0.730352 |
920ccf45e6b42ac11c66d6fc58806bcefda15104
| 219 |
py
|
Python
|
Chap04/04_02.py
|
elishahyousaf/linkedin-exercise-files
|
d79692fd4594d5b6f70253f78e7c4822e7659a00
|
[
"MIT"
] | null | null | null |
Chap04/04_02.py
|
elishahyousaf/linkedin-exercise-files
|
d79692fd4594d5b6f70253f78e7c4822e7659a00
|
[
"MIT"
] | null | null | null |
Chap04/04_02.py
|
elishahyousaf/linkedin-exercise-files
|
d79692fd4594d5b6f70253f78e7c4822e7659a00
|
[
"MIT"
] | 5 |
2021-01-15T04:13:50.000Z
|
2021-02-06T02:52:42.000Z
|
print("Hi!")
name = input("What's your name? ")
print("It's nice to meet you,", name)
answer = input("Are you enjoying the course? ")
if answer == "Yes":
print("That's good to hear!")
print("Final statement")
| 15.642857 | 47 | 0.630137 |
b3908c1c5d44d8061adc96e75d3b2ff298ab0b00
| 9,939 |
py
|
Python
|
auth0/v3/test/management/test_users.py
|
Sytten/auth0-python
|
59c1942acbd9723adaf587ac4bc94c9583fe38a0
|
[
"MIT"
] | null | null | null |
auth0/v3/test/management/test_users.py
|
Sytten/auth0-python
|
59c1942acbd9723adaf587ac4bc94c9583fe38a0
|
[
"MIT"
] | null | null | null |
auth0/v3/test/management/test_users.py
|
Sytten/auth0-python
|
59c1942acbd9723adaf587ac4bc94c9583fe38a0
|
[
"MIT"
] | null | null | null |
import unittest
import mock
from ...management.users import Users
class TestUsers(unittest.TestCase):
@mock.patch('auth0.v3.management.users.RestClient')
def test_list(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.list()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users', args[0])
self.assertEqual(kwargs['params'], {
'per_page': 25,
'page': 0,
'include_totals': 'true',
'sort': None,
'connection': None,
'fields': None,
'include_fields': 'true',
'q': None,
'search_engine': None
})
u.list(page=1, per_page=50, sort='s', connection='con', q='q',
search_engine='se', include_totals=False, fields=['a', 'b'],
include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users', args[0])
self.assertEqual(kwargs['params'], {
'per_page': 50,
'page': 1,
'include_totals': 'false',
'sort': 's',
'connection': 'con',
'fields': 'a,b',
'include_fields': 'false',
'q': 'q',
'search_engine': 'se'
})
@mock.patch('auth0.v3.management.users.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.create({'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/users', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v3.management.users.RestClient')
def test_delete_all_users(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.delete_all_users()
mock_instance.delete.assert_called_with(
'https://domain/api/v2/users'
)
@mock.patch('auth0.v3.management.users.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.get('an-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/an-id', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
u.get('an-id', fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/an-id', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v3.management.users.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.delete('an-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/users/an-id'
)
@mock.patch('auth0.v3.management.users.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.update('an-id', {'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.patch.call_args
self.assertEqual('https://domain/api/v2/users/an-id', args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v3.management.users.RestClient')
def test_list_roles(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.list_roles('an-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/an-id/roles', args[0])
self.assertEqual(kwargs['params'], {
'per_page': 25,
'page': 0,
'include_totals': 'true'
})
u.list_roles(id='an-id', page=1, per_page=50, include_totals=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/an-id/roles', args[0])
self.assertEqual(kwargs['params'], {
'per_page': 50,
'page': 1,
'include_totals': 'false'
})
@mock.patch('auth0.v3.management.users.RestClient')
def test_remove_roles(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.remove_roles('an-id', ['a', 'b'])
args, kwargs = mock_instance.delete.call_args
self.assertEqual('https://domain/api/v2/users/an-id/roles',
args[0])
self.assertEqual(kwargs['data'], {'roles': ['a', 'b']})
@mock.patch('auth0.v3.management.users.RestClient')
def test_add_roles(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.add_roles('an-id', ['a', 'b'])
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/users/an-id/roles',
args[0])
self.assertEqual(kwargs['data'], {'roles': ['a', 'b']})
@mock.patch('auth0.v3.management.users.RestClient')
def test_list_permissions(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.list_permissions('an-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/an-id/permissions', args[0])
self.assertEqual(kwargs['params'], {
'per_page': 25,
'page': 0,
'include_totals': 'true'
})
u.list_permissions(id='an-id', page=1, per_page=50, include_totals=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/an-id/permissions', args[0])
self.assertEqual(kwargs['params'], {
'per_page': 50,
'page': 1,
'include_totals': 'false'
})
@mock.patch('auth0.v3.management.users.RestClient')
def test_remove_permissions(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.remove_permissions('an-id', ['a', 'b'])
args, kwargs = mock_instance.delete.call_args
self.assertEqual('https://domain/api/v2/users/an-id/permissions',
args[0])
self.assertEqual(kwargs['data'], {'permissions': ['a', 'b']})
@mock.patch('auth0.v3.management.users.RestClient')
def test_add_permissions(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.add_permissions('an-id', ['a', 'b'])
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/users/an-id/permissions',
args[0])
self.assertEqual(kwargs['data'], {'permissions': ['a', 'b']})
@mock.patch('auth0.v3.management.users.RestClient')
def test_delete_multifactor(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.delete_multifactor('an-id', 'provider')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/users/an-id/multifactor/provider'
)
@mock.patch('auth0.v3.management.users.RestClient')
def test_unlink_user_account(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.unlink_user_account('an-id', 'provider', 'user-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/users/an-id/identities/provider/user-id'
)
@mock.patch('auth0.v3.management.users.RestClient')
def test_link_user_account(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.link_user_account('user-id', {'a': 'b', 'c': 'd'})
args, kwargs = mock_instance.post.call_args
self.assertEqual('https://domain/api/v2/users/user-id/identities',
args[0])
self.assertEqual(kwargs['data'], {'a': 'b', 'c': 'd'})
@mock.patch('auth0.v3.management.users.RestClient')
def test_regenerate_recovery_code(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.regenerate_recovery_code('user-id')
mock_instance.post.assert_called_with(
'https://domain/api/v2/users/user-id/recovery-code-regeneration'
)
@mock.patch('auth0.v3.management.users.RestClient')
def test_get_guardian_enrollments(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.get_guardian_enrollments('user-id')
mock_instance.get.assert_called_with(
'https://domain/api/v2/users/user-id/enrollments'
)
@mock.patch('auth0.v3.management.users.RestClient')
def test_get_log_events(self, mock_rc):
mock_instance = mock_rc.return_value
u = Users(domain='domain', token='jwttoken')
u.get_log_events('used_id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/users/used_id/logs', args[0])
self.assertEqual(kwargs['params']['page'], 0)
self.assertEqual(kwargs['params']['per_page'], 50)
self.assertIsNone(kwargs['params']['sort'])
self.assertEqual(kwargs['params']['include_totals'], 'false')
| 34.272414 | 82 | 0.596941 |
2f7139db9f37f4086a35725290d154a4e7af5353
| 1,278 |
py
|
Python
|
tests/platforms/windows/msi/test_run.py
|
junefish/python-briefcase
|
93f5c22304b3914b3c20b82e01d0a5914119faef
|
[
"BSD-3-Clause"
] | 917 |
2019-03-30T15:45:39.000Z
|
2022-03-31T05:32:02.000Z
|
tests/platforms/windows/msi/test_run.py
|
junefish/python-briefcase
|
93f5c22304b3914b3c20b82e01d0a5914119faef
|
[
"BSD-3-Clause"
] | 429 |
2019-04-07T19:03:20.000Z
|
2022-03-31T23:47:42.000Z
|
tests/platforms/windows/msi/test_run.py
|
junefish/python-briefcase
|
93f5c22304b3914b3c20b82e01d0a5914119faef
|
[
"BSD-3-Clause"
] | 166 |
2019-04-02T01:56:55.000Z
|
2022-03-28T19:10:02.000Z
|
import os
from unittest import mock
import pytest
from briefcase.exceptions import BriefcaseCommandError
from briefcase.platforms.windows.msi import WindowsMSIRunCommand
def test_run_app(first_app_config, tmp_path):
"A windows MSI can be started"
command = WindowsMSIRunCommand(base_path=tmp_path)
command.subprocess = mock.MagicMock()
command.run_app(first_app_config)
command.subprocess.run.assert_called_with(
[
os.fsdecode(tmp_path / 'windows' / 'msi' / 'First App' / 'src' / 'python' / 'pythonw.exe'),
"-m", "first_app"
],
check=True
)
def test_run_app_failed(first_app_config, tmp_path):
"If there's a problem started the app, an exception is raised"
command = WindowsMSIRunCommand(base_path=tmp_path)
command.subprocess = mock.MagicMock()
command.subprocess.run.side_effect = BriefcaseCommandError('problem')
with pytest.raises(BriefcaseCommandError):
command.run_app(first_app_config)
# The run command was still invoked, though
command.subprocess.run.assert_called_with(
[
os.fsdecode(tmp_path / 'windows' / 'msi' / 'First App' / 'src' / 'python' / 'pythonw.exe'),
"-m", "first_app"
],
check=True
)
| 29.72093 | 103 | 0.679186 |
72d91f5bd001e18848e19e82dbf9abdc17a98ba9
| 6,438 |
py
|
Python
|
pwrball info compile.py
|
mnewls/LSTM-Practice
|
4666b59a43cf0b4fd1db760413afc98fcb45ef85
|
[
"MIT"
] | null | null | null |
pwrball info compile.py
|
mnewls/LSTM-Practice
|
4666b59a43cf0b4fd1db760413afc98fcb45ef85
|
[
"MIT"
] | null | null | null |
pwrball info compile.py
|
mnewls/LSTM-Practice
|
4666b59a43cf0b4fd1db760413afc98fcb45ef85
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException, NoSuchElementException
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.action_chains import ActionChains
# open each - grab date, job title, location - these will be different cols in the excel.
#
from openpyxl import Workbook
import html5lib
from selenium.webdriver.support.ui import Select
# to find links
from bs4 import BeautifulSoup
import urllib.request
import time # to sleep
wb = Workbook()
ws = wb.active
ws.title = "Jobs"
ws['B1'] = "Day"
ws['C1'] = "Month"
ws['D1'] = "Year"
ws['E1'] = "WB1"
ws['F1'] = "WB2"
ws['G1'] = "WB3"
ws['H1'] = "WB4"
ws['I1'] = "WB5"
ws['J1'] = "PB"
ws['K1'] = "prize"
def get_page_info(driver):
count = 2
for url_year in range(1992, 2021, 1):
url_str = r'https://www.lotto.net/powerball/numbers/' + str(url_year)
#print(url_str)
driver.get(url_str)
time.sleep(2)
#print(driver.find_element_by_xpath("//*[@id='content']/div[1]/div[1]/div[1]/text()").getText())
page_source = driver.page_source
soup = BeautifulSoup(page_source, "html5lib")
list_dates = []
is_odd = 1
list_days = []
list_year = []
list_month = []
for date in soup.findAll("div", {"class": "date"}):
date_str = date.text[14:len(date.text) - 5]
#if is_odd % 2 != 0:
#date_str = date_str[1:len(date_str)]
month = date_str[0:4]
#print(month)
if "Jan" in month:
month_num = 1
elif "Feb" in month:
month_num = 2
elif "Mar" in month:
month_num = 3
elif "Apr" in month:
month_num = 4
elif 'May' in month:
month_num = 5
elif 'Jun' in month:
month_num = 6
elif 'Jul' in month:
month_num = 7
elif 'Aug' in month:
month_num = 8
elif 'Sep' in month:
month_num = 9
elif 'Oct' in month:
month_num = 10
elif 'Nov' in month:
month_num = 11
else:
month_num = 12
print(month_num)
list_month.append(month_num)
#print(month_num)
#print(date_str)
date_nums = []
date_cleaned = date_str.replace('t', '')
date_cleaned = date_cleaned.replace('h', '')
date_cleaned = date_cleaned.replace('r', '')
date_cleaned = date_cleaned.replace('s', '')
date_cleaned = date_cleaned.replace('n', '')
date_cleaned = date_cleaned.replace('d', '')
for word in date_cleaned.split():
if word.isdigit():
date_nums.append(int(word))
list_days.append(date_nums[0])
list_year.append(date_nums[1])
is_odd+=1
#date_str = date_str.replace('t', '')
#date_str = date_str.replace('h', '')
list_dates.append(date_str)
list_len = len(list_dates)
#print(len(list_month))
#print(list_days)
#print(len(list_days))
#print(list_year)
#print(len(list_year))
#list_jackpots = soup.findAll("div", {"class": "jackpot"})
list_jackpots = []
for jackpot in soup.findAll("div", {"class": "jackpot"}):
jackpot_str = jackpot.text[31:len(jackpot.text) - 21]
jackpot_str = jackpot_str.replace('t', '')
jackpot_str = jackpot_str.replace('n', '')
jackpot_str = jackpot_str.replace('\'', '')
list_jackpots.append(jackpot_str)
#print(list_jackpots)
#print(list_jackpots)
#list_nums = soup.findAll("li", {"class": "ball ball"})
ball_num_list = []
for ball in soup.findAll("li", {"class": "ball ball"}):
ball_num = ball.text
ball_num_list.append(ball_num)
pwr_ball_num_list = []
for pwr_ball in soup.findAll("li", {"class": "ball powerball"}):
pwr_ball_num = pwr_ball.text[0:len(pwr_ball.text)-9]
pwr_ball_num_list.append(pwr_ball_num)
#print(len(pwr_ball_num_list))
WB1_list = ball_num_list[0:len(ball_num_list):5]
WB2_list = ball_num_list[1:len(ball_num_list):5]
WB3_list = ball_num_list[2:len(ball_num_list):5]
WB4_list = ball_num_list[3:len(ball_num_list):5]
WB5_list = ball_num_list[4:len(ball_num_list):5]
#print(PB_list)
#print(list_len)
#print(WB1_list)
#print(ball_num_list)
#for i in num_draws
for i in range(list_len):
day_place = 'B' + str(count)
month_place = 'C' + str(count)
year_place = 'D' + str(count)
WB1_place = 'E' + str(count)
WB2_place = 'F' + str(count)
WB3_place = 'G' + str(count)
WB4_place = 'H' + str(count)
WB5_place = 'I' + str(count)
PB_place = 'J' + str(count)
jackpot_place = 'K' + str(count)
ws[day_place] = list_days[i]
ws[month_place] = list_month[i]
ws[year_place] = list_year[i]
ws[WB1_place] = WB1_list[i]
ws[WB2_place] = WB2_list[i]
ws[WB3_place] = WB3_list[i]
ws[WB4_place] = WB4_list[i]
ws[WB5_place] = WB5_list[i]
ws[PB_place] = pwr_ball_num_list[i]
ws[jackpot_place] = list_jackpots[i]
count += 1
def get_info():
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
driver = webdriver.Chrome(executable_path=r'C:\Users\Michael\Desktop\Automate Application\chromedriver.exe', chrome_options=options)
get_page_info(driver)
wb.save('test_workbook.xlsx')
get_info()
| 27.991304 | 137 | 0.536036 |
6d3cba4796d4fc31908258729a6464beedfec6d1
| 481 |
py
|
Python
|
Views.py
|
SarankumarJ/serversideprocessing
|
3cde30613b361c88b71f91779c5001d8ad34a585
|
[
"BSD-3-Clause"
] | null | null | null |
Views.py
|
SarankumarJ/serversideprocessing
|
3cde30613b361c88b71f91779c5001d8ad34a585
|
[
"BSD-3-Clause"
] | null | null | null |
Views.py
|
SarankumarJ/serversideprocessing
|
3cde30613b361c88b71f91779c5001d8ad34a585
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
# Create your views here.
def areacalculation(request):
context = {}
context["area"] = "0"
context["l"] = "0"
context["b"] = "0"
if request.method == 'POST':
l= request.POST.get('length','0')
b= request.POST.get('breadth','0')
area = int(l) * int(b)
context["area"] = area
context["l"] = l
context["b"] = b
return render(request,'myapp/area.html',context)
| 28.294118 | 52 | 0.540541 |
39a15f889dc5784e1e360b19b3027bf53c22ff90
| 1,603 |
py
|
Python
|
interlink/management/commands/subscribe_members.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | 1 |
2019-08-15T00:10:38.000Z
|
2019-08-15T00:10:38.000Z
|
interlink/management/commands/subscribe_members.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | null | null | null |
interlink/management/commands/subscribe_members.py
|
czue/nadine
|
61fbfcac4d0c3159aa73500e47f4fa23c0aa9ef0
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
import time
import urllib.request, urllib.parse, urllib.error
import logging
import datetime
logger = logging.getLogger()
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from interlink.models import MailingList
class Command(BaseCommand):
help = "Subscribes every user with an active membership to a mailing list."
args = "[mailing-list-id]"
requires_system_checks = True
def print_usage(self):
print('./manage.py subscribe_members <mailing-list-id>')
def handle(self, *args, **options):
if len(args) != 1:
self.print_usage()
return
ml_id = args[0]
if not MailingList.objects.filter(pk=ml_id).exists():
logger.error('Did not find find mailing list with id %s' % mk_id)
return
mailing_list = MailingList.objects.get(pk=ml_id)
for user in User.helper.active_members():
mailing_list.subscribers.add(user)
# Copyright 2018 Office Nomads LLC (http://officenomads.name/) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.
| 42.184211 | 580 | 0.724267 |
7f3ad456e16357d2a4a4b416c79731505a94744f
| 2,938 |
py
|
Python
|
libtbx/test_utils/python3_regression.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
libtbx/test_utils/python3_regression.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
libtbx/test_utils/python3_regression.py
|
hbrunie/cctbx_project
|
2d8cb383d50fe20cdbbe4bebae8ed35fabce61e5
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import os
def find_new_python3_incompatible_code(module_under_test):
'''
Check source code to see if any files violate Python 3 syntax that
previously did not. Example call:
def test_find_python3_violations():
import xia2
import pytest
import libtbx.test_utils.python3_regression as py3test
result = py3test.find_new_python3_incompatible_code(xia2)
if result is None:
pytest.skip('No python3 interpreter available')
elif result:
pytest.fail(result)
Known violations are kept in file .known-python3-violations in the
module directory.
:param module_under_test: The imported module that should be tested.
This is the module object, not a string
containing the name of the module.
:return: False if the module contains no unexpected python 3 incompatible
code. Returns None if the test can't be run. This will typically
be due to a missing dependency such as the python 3 interpreter
or a required library. If unexpected python 3 incompatible code
is found a string containing a short summary is returned.
'''
# File containing list of excluded files
allowed_broken_files_list = '.known-python3-violations'
# Mask all *PYTHON* variables from environment - Python3 will not like cctbx python settings
environ_override = { k: '' for k in list(os.environ) if 'PYTHON' in k }
module_path = module_under_test.__path__[0]
try:
import procrunner
result = procrunner.run(['python3', '-m', 'compileall', '-x', '\.git', '-q', module_path], environment_override=environ_override, print_stdout=False)
except ImportError:
return None
except OSError as e:
if e.errno == 2:
return None
raise
if result['stderr']:
return 'Python3 compilation exited with unexpected STDERR output'
if not result['exitcode']: # No compilation errors
return False
errors = [x.replace(module_path + os.path.sep, '').strip() for x in result['stdout'].split('***')]
errors = filter(lambda x: "'" in x, errors)
broken_files = { error.split("'")[1]: error for error in errors }
exclusion_file = os.path.join(module_path, allowed_broken_files_list)
with open(exclusion_file + '.log', 'w') as fh:
fh.write("\n".join(sorted(broken_files)))
if os.path.exists(exclusion_file):
with open(exclusion_file, 'r') as fh:
excluded_files = fh.read().splitlines()
broken_files = { filename: broken_files[filename] for filename in broken_files if filename not in excluded_files }
if not broken_files: # No syntax violations in new files
return False
for filename in sorted(broken_files):
print(broken_files[filename], end="\n\n")
return "{} file[s] contain newly introduced Python3 syntax errors".format(len(broken_files))
| 39.173333 | 153 | 0.698775 |
d5e6ac9339df56186b115d94268a6d726afa1197
| 214 |
py
|
Python
|
src/myproject/wsgi.py
|
MrRightHand97/hoc_git2
|
867cb10b35860ed6b18cc4e2f00dda8e78ea84be
|
[
"MIT"
] | null | null | null |
src/myproject/wsgi.py
|
MrRightHand97/hoc_git2
|
867cb10b35860ed6b18cc4e2f00dda8e78ea84be
|
[
"MIT"
] | null | null | null |
src/myproject/wsgi.py
|
MrRightHand97/hoc_git2
|
867cb10b35860ed6b18cc4e2f00dda8e78ea84be
|
[
"MIT"
] | null | null | null |
"""
WSGI config for myproject project.
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myproject.settings')
application = get_wsgi_application()
| 17.833333 | 69 | 0.794393 |
87962c3cc759c94f26d54d518030ea81020dd96a
| 6,893 |
py
|
Python
|
Scripts/txt2ctf.py
|
Wootai/CNTK
|
5eca042341c8152594e67652a44c3b733a2acaa0
|
[
"RSA-MD"
] | 5 |
2017-08-28T08:27:18.000Z
|
2021-04-20T21:12:52.000Z
|
Scripts/txt2ctf.py
|
zhuyawen/CNTK
|
0ee09cf771bda9d4912790e0fed7322e89d86d87
|
[
"RSA-MD"
] | null | null | null |
Scripts/txt2ctf.py
|
zhuyawen/CNTK
|
0ee09cf771bda9d4912790e0fed7322e89d86d87
|
[
"RSA-MD"
] | 3 |
2019-08-23T11:42:14.000Z
|
2022-01-06T08:41:32.000Z
|
#!/usr/bin/env python
# This script takes a list of dictionary files and a plain text utf-8 file and converts this text input file to CNTK text format.
#
# The input text file must contain N streams per line (N TAB-separated "columns") and should be accompanied by N dictionary files.
# The input text file must be in the following form:
# text1 TAB text2 TAB ... TAB textN
# .....
# where each line represents one sequence across all N input streams.
# Each text consists of one or more space-separated word tokens (samples).
#
# Dictionary files are text files that are required to be specified for all streams,
# so the #dictionaries = #columns in the input file.
# A dictionary contains a single token per line. The zero-based line number becomes the numeric index
# of the token in the output CNTK text format file.
# Example usage (i.e. for PennTreebank files):
# 1)
# sed -e 's/^<\/s> //' -e 's/ <\/s>$//' < en.txt > en.txt1
# sed -e 's/^<\/s> //' -e 's/ <\/s>$//' < fr.txt > fr.txt1
# paste en.txt1 fr.txt1 | txt2ctf.py --map en.dict fr.dict > en-fr.ctf
#
# 2) (assuming that the current dir is [cntk root]/Examples/SequenceToSequence/CMUDict/Data/)
# sed -e 's/<s\/>/<\/s>\t<s>/' < cmudict-0.7b.train-dev-1-21.txt `#this will replace every '<s/>' with '</s>[tab]<s>'` |\
# python ../../../../Scripts/txt2ctf.py --map cmudict-0.7b.mapping cmudict-0.7b.mapping > cmudict-0.7b.train-dev-1-21.ctf
#
import sys
import argparse
import re
def convert(dictionaryStreams, inputs, output, unk, annotated):
# create in memory dictionaries
dictionaries = [{ line.rstrip('\r\n').strip():index for index, line in enumerate(dic) } for dic in dictionaryStreams]
# convert inputs
for input in inputs:
sequenceId = 0
for index, line in enumerate(input):
line = line.rstrip('\r\n')
columns = line.split("\t")
if len(columns) != len(dictionaries):
raise Exception("Number of dictionaries {0} does not correspond to the number of streams in line {1}:'{2}'"
.format(len(dictionaries), index, line))
_convertSequence(dictionaries, columns, sequenceId, output, unk, annotated)
sequenceId += 1
def _convertSequence(dictionaries, streams, sequenceId, output, unk, annotated):
tokensPerStream = [[t for t in s.strip(' ').split(' ') if t != ""] for s in streams]
maxLen = max(len(tokens) for tokens in tokensPerStream)
# writing to the output file
for sampleIndex in range(maxLen):
output.write(str(sequenceId))
for streamIndex in range(len(tokensPerStream)):
if len(tokensPerStream[streamIndex]) <= sampleIndex:
output.write("\t")
continue
token = tokensPerStream[streamIndex][sampleIndex]
if unk is not None and token not in dictionaries[streamIndex]: # try unk symbol if specified
token = unk
if token not in dictionaries[streamIndex]:
raise Exception("Token '{0}' cannot be found in the dictionary for stream {1}".format(token, streamIndex))
value = dictionaries[streamIndex][token]
output.write("\t|S" + str(streamIndex) + " "+ str(value) + ":1")
if annotated:
output.write(" |# " + re.sub(r'(\|(?!#))|(\|$)', r'|#', token))
output.write("\n")
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Transforms text file given dictionaries into CNTK text format.")
parser.add_argument('--map', help='List of dictionaries, given in the same order as streams in the input files',
nargs="+", required=True)
parser.add_argument('--annotated', help='Whether to annotate indices with tokens. Default is false',
choices=["True", "False"], default="False", required=False)
parser.add_argument('--output', help='Name of the output file, stdout if not given', default="", required=False)
parser.add_argument('--input', help='Name of the inputs files, stdin if not given', default="", nargs="*", required=False)
parser.add_argument('--unk', help='Name fallback symbol for tokens not in dictionary (same for all columns)', default=None, required=False)
args = parser.parse_args()
# creating inputs
inputs = [sys.stdin]
if len(args.input) != 0:
inputs = [open(i, encoding="utf-8") for i in args.input]
# creating output
output = sys.stdout
if args.output != "":
output = open(args.output, "w")
convert([open(d, encoding="utf-8") for d in args.map], inputs, output, args.unk, args.annotated == "True")
#####################################################################################################
# Tests
#####################################################################################################
try:
import StringIO
stringio = StringIO.StringIO
except ImportError:
from io import StringIO
stringio = StringIO
try:
import pytest
except ImportError:
pass
def test_simpleSanityCheck():
dictionary1 = stringio("hello\nmy\nworld\nof\nnothing\n")
dictionary2 = stringio("let\nme\nbe\nclear\nabout\nit\n")
input = stringio("hello my\tclear about\nworld of\tit let clear\n")
output = stringio()
convert([dictionary1, dictionary2], [input], output, None, False)
expectedOutput = stringio()
expectedOutput.write("0\t|S0 0:1\t|S1 3:1\n")
expectedOutput.write("0\t|S0 1:1\t|S1 4:1\n")
expectedOutput.write("1\t|S0 2:1\t|S1 5:1\n")
expectedOutput.write("1\t|S0 3:1\t|S1 0:1\n")
expectedOutput.write("1\t\t|S1 3:1\n")
assert expectedOutput.getvalue() == output.getvalue()
def test_thatPipeSymbolIsEscaped():
dictionary1 = stringio("|hello\nm|y\nworl|d\nof\nnothing|\n")
dictionary2 = stringio("let|\nm|e\nb|#e\nclear\n||about\ni||#t\n")
input = stringio("|hello m|y\tclear ||about\nworl|d of\ti||#t let| clear\n")
output = stringio()
convert([dictionary1, dictionary2], [input], output, None, True)
expectedOutput = stringio()
expectedOutput.write("0\t|S0 0:1 |# |#hello\t|S1 3:1 |# clear\n")
expectedOutput.write("0\t|S0 1:1 |# m|#y\t|S1 4:1 |# |#|#about\n")
expectedOutput.write("1\t|S0 2:1 |# worl|#d\t|S1 5:1 |# i|#|#t\n")
expectedOutput.write("1\t|S0 3:1 |# of\t|S1 0:1 |# let|#\n")
expectedOutput.write("1\t\t|S1 3:1 |# clear\n")
for x in zip(output.getvalue().split('\n'), expectedOutput.getvalue().split('\n')):
assert x[0] == x[1]
def test_nonExistingWord():
dictionary1 = stringio("hello\nmy\nworld\nof\nnothing\n")
input = stringio("hello my\nworld of nonexistent\n")
output = stringio()
with pytest.raises(Exception) as info:
convert([dictionary1], [input], output, None, False)
assert str(info.value) == "Token 'nonexistent' cannot be found in the dictionary for stream 0"
| 45.953333 | 143 | 0.631365 |
8ca4a2f361a8ce7b3083a6a3d4de23bda7e0abf0
| 20,495 |
py
|
Python
|
src/k8s-extension/azext_k8s_extension/vendored_sdks/v2021_03_01/aio/operations/_source_control_configurations_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1 |
2022-02-01T18:50:12.000Z
|
2022-02-01T18:50:12.000Z
|
src/k8s-extension/azext_k8s_extension/vendored_sdks/v2021_03_01/aio/operations/_source_control_configurations_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 9 |
2022-03-25T19:35:49.000Z
|
2022-03-31T06:09:47.000Z
|
src/k8s-extension/azext_k8s_extension/vendored_sdks/v2021_03_01/aio/operations/_source_control_configurations_operations.py
|
haroonf/azure-cli-extensions
|
61c044d34c224372f186934fa7c9313f1cd3a525
|
[
"MIT"
] | 1 |
2022-03-10T22:13:02.000Z
|
2022-03-10T22:13:02.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import functools
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._source_control_configurations_operations import build_create_or_update_request, build_delete_request_initial, build_get_request, build_list_request
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class SourceControlConfigurationsOperations:
"""SourceControlConfigurationsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
@distributed_trace_async
async def get(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
**kwargs: Any
) -> "_models.SourceControlConfiguration":
"""Gets details of the Source Control Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or
~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param source_control_configuration_name: Name of the Source Control Configuration.
:type source_control_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceControlConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.SourceControlConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceControlConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_get_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
source_control_configuration_name=source_control_configuration_name,
template_url=self.get.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('SourceControlConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
@distributed_trace_async
async def create_or_update(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
source_control_configuration: "_models.SourceControlConfiguration",
**kwargs: Any
) -> "_models.SourceControlConfiguration":
"""Create a new Kubernetes Source Control Configuration.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or
~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param source_control_configuration_name: Name of the Source Control Configuration.
:type source_control_configuration_name: str
:param source_control_configuration: Properties necessary to Create KubernetesConfiguration.
:type source_control_configuration:
~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.SourceControlConfiguration
:keyword callable cls: A custom type or function that will be passed the direct response
:return: SourceControlConfiguration, or the result of cls(response)
:rtype: ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.SourceControlConfiguration
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceControlConfiguration"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
content_type = kwargs.pop('content_type', "application/json") # type: Optional[str]
_json = self._serialize.body(source_control_configuration, 'SourceControlConfiguration')
request = build_create_or_update_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
source_control_configuration_name=source_control_configuration_name,
content_type=content_type,
json=_json,
template_url=self.create_or_update.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('SourceControlConfiguration', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('SourceControlConfiguration', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
**kwargs: Any
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
request = build_delete_request_initial(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
source_control_configuration_name=source_control_configuration_name,
template_url=self._delete_initial.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
@distributed_trace_async
async def begin_delete(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
source_control_configuration_name: str,
**kwargs: Any
) -> AsyncLROPoller[None]:
"""This will delete the YAML file used to set up the Source control configuration, thus stopping
future sync from the source repo.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or
~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:param source_control_configuration_name: Name of the Source Control Configuration.
:type source_control_configuration_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises: ~azure.core.exceptions.HttpResponseError
"""
polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
source_control_configuration_name=source_control_configuration_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations/{sourceControlConfigurationName}'} # type: ignore
@distributed_trace
def list(
self,
resource_group_name: str,
cluster_rp: Union[str, "_models.Enum0"],
cluster_resource_name: Union[str, "_models.Enum1"],
cluster_name: str,
**kwargs: Any
) -> AsyncIterable["_models.SourceControlConfigurationList"]:
"""List all Source Control Configurations.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param cluster_rp: The Kubernetes cluster RP - either Microsoft.ContainerService (for AKS
clusters) or Microsoft.Kubernetes (for OnPrem K8S clusters).
:type cluster_rp: str or ~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum0
:param cluster_resource_name: The Kubernetes cluster resource name - either managedClusters
(for AKS clusters) or connectedClusters (for OnPrem K8S clusters).
:type cluster_resource_name: str or
~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.Enum1
:param cluster_name: The name of the kubernetes cluster.
:type cluster_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either SourceControlConfigurationList or the result of
cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.kubernetesconfiguration.v2021_03_01.models.SourceControlConfigurationList]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.SourceControlConfigurationList"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
def prepare_request(next_link=None):
if not next_link:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
template_url=self.list.metadata['url'],
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = build_list_request(
subscription_id=self._config.subscription_id,
resource_group_name=resource_group_name,
cluster_rp=cluster_rp,
cluster_resource_name=cluster_resource_name,
cluster_name=cluster_name,
template_url=next_link,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("SourceControlConfigurationList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{clusterRp}/{clusterResourceName}/{clusterName}/providers/Microsoft.KubernetesConfiguration/sourceControlConfigurations'} # type: ignore
| 50.730198 | 288 | 0.697926 |
76114227dea45e2774c5ea58d11547cf90d1f39c
| 2,443 |
py
|
Python
|
toolkit/tamr_aligner/system/misc.py
|
shamy1997/HIT-SCIR-CoNLL2019
|
48a0a0429bae18968efaffbe6e5c97344e1d8ff1
|
[
"Apache-2.0"
] | 24 |
2019-10-07T12:38:00.000Z
|
2021-09-28T06:44:56.000Z
|
toolkit/tamr_aligner/system/misc.py
|
shamy1997/HIT-SCIR-CoNLL2019
|
48a0a0429bae18968efaffbe6e5c97344e1d8ff1
|
[
"Apache-2.0"
] | 13 |
2020-01-14T13:26:37.000Z
|
2020-09-21T11:35:11.000Z
|
toolkit/tamr_aligner/system/misc.py
|
shamy1997/HIT-SCIR-CoNLL2019
|
48a0a0429bae18968efaffbe6e5c97344e1d8ff1
|
[
"Apache-2.0"
] | 10 |
2019-10-09T07:14:05.000Z
|
2020-12-11T19:02:13.000Z
|
#!/usr/bin/env python
from __future__ import print_function
from __future__ import unicode_literals
import sys
from datetime import datetime
_DATE_FORMATS = {
'%y0000': (True, False, False),
'%y%m00': (True, True, False),
'%y%m%d': (True, True, True),
'%Y0000': (True, False, False),
'%Y%m00': (True, True, False),
'%d %B %Y': (True, True, True),
'%d %B': (True, True, False),
'%d %Y': (True, False, True),
'%Y%m%d': (True, True, True),
'%Y-%m-%d': (True, True, True),
'%m/%d': (False, True, True),
'%m/%d/%Y': (True, True, True),
'%m - %d - %Y': (True, True, True),
'%B %Y': (True, True, False),
'%B , %Y': (True, True, False),
'%B %d %Y': (True, True, True),
'%B %d , %Y': (True, True, True),
'%B %d': (False, True, True),
'%B %dst': (False, True, True),
'%B %dnd': (False, True, True),
'%B %drd': (False, True, True),
'%B %dth': (False, True, True),
'%B': (False, True, False),
'%Y': (True, False, False),
'%y': (True, False, False),
}
def parse_date(expression):
results = []
for format_ in _DATE_FORMATS:
try:
result = datetime.strptime(expression, format_)
results.append((result, _DATE_FORMATS[format_]))
except:
continue
results = list(filter(lambda result: 1900 <= result[0].year < 2100, results))
if len(results) > 1:
return results[0]
elif len(results) == 1:
return results[0]
else:
return None, (False, False, False)
def parse_all_dates(expression):
results = []
for format_ in _DATE_FORMATS:
try:
result = datetime.strptime(expression, format_)
results.append((result, _DATE_FORMATS[format_]))
except:
continue
results = list(filter(lambda r: 1900 <= r[0].year < 2100, results))
return results
def test():
for line in open(sys.argv[1], 'r'):
expression, fields = line.strip().split('|||')
expression = expression.strip()
result = parse_date(expression)
slots = result[1]
for field in fields:
if field == 'year':
assert slots[0]
if field == 'month':
assert slots[1]
if field == 'day':
assert slots[2]
print('{0} ||| {1} ||| {2}'.format(expression, slots, fields), file=sys.stderr)
if __name__ == "__main__":
test()
| 29.792683 | 87 | 0.535407 |
58a31a0a5d2f788e2a08726a79686f629c7b6eca
| 8,925 |
py
|
Python
|
plugins/hdfs_assetstore/server/assetstore.py
|
data-exp-lab/girder
|
25e5847eaefec75f02c83f8d46aa55dcc59acb01
|
[
"Apache-2.0"
] | 1 |
2019-11-14T18:13:26.000Z
|
2019-11-14T18:13:26.000Z
|
plugins/hdfs_assetstore/server/assetstore.py
|
data-exp-lab/girder
|
25e5847eaefec75f02c83f8d46aa55dcc59acb01
|
[
"Apache-2.0"
] | 3 |
2018-11-15T19:52:40.000Z
|
2022-02-14T21:56:22.000Z
|
plugins/hdfs_assetstore/server/assetstore.py
|
data-exp-lab/girder
|
25e5847eaefec75f02c83f8d46aa55dcc59acb01
|
[
"Apache-2.0"
] | 3 |
2018-05-21T19:45:19.000Z
|
2019-04-08T19:53:07.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import os
import posixpath
import pwd
import requests
from snakebite.client import Client as HdfsClient
import uuid
from girder import logger
from girder.api.rest import setResponseHeader
from girder.exceptions import ValidationException
from girder.utility.abstract_assetstore_adapter import AbstractAssetstoreAdapter
class HdfsAssetstoreAdapter(AbstractAssetstoreAdapter):
def __init__(self, assetstore):
super(HdfsAssetstoreAdapter, self).__init__(assetstore)
self.client = self._getClient(self.assetstore)
@staticmethod
def _getHdfsUser(assetstore):
"""
If the given assetstore has an effective user specified, this returns
it. Otherwise returns the current user.
"""
return assetstore['hdfs'].get('user') or pwd.getpwuid(os.getuid())[0]
@staticmethod
def _getClient(assetstore):
return HdfsClient(
host=assetstore['hdfs']['host'], port=assetstore['hdfs']['port'],
use_trash=False,
effective_user=HdfsAssetstoreAdapter._getHdfsUser(assetstore)
)
def _absPath(self, doc):
"""
Return the absolute path in HDFS for a given file or upload.
:param doc: The file or upload document.
"""
return posixpath.join(
self.assetstore['hdfs']['path'], doc['hdfs']['path'])
@staticmethod
def validateInfo(doc):
"""
Ensures we have the necessary information to connect to HDFS instance,
and uses snakebite to actually connect to it.
"""
info = doc.get('hdfs', {})
for field in ('host', 'port', 'path', 'webHdfsPort', 'user'):
if field not in info:
raise ValidationException('Missing %s field.' % field)
if not info['webHdfsPort']:
info['webHdfsPort'] = 50070
try:
info['webHdfsPort'] = int(info['webHdfsPort'])
info['port'] = int(info['port'])
except ValueError:
raise ValidationException('Port values must be numeric.',
field='port')
try:
client = HdfsAssetstoreAdapter._getClient(doc)
client.serverdefaults()
except Exception:
raise ValidationException('Could not connect to HDFS at %s:%d.' %
(info['host'], info['port']))
# TODO test connection to webHDFS? Not now since it's not required
if not posixpath.isabs(info['path']):
raise ValidationException('Path must be absolute.', field='path')
if not client.test(info['path'], exists=True, directory=True):
res = client.mkdir([info['path']], create_parent=True).next()
if not res['result']:
raise ValidationException(res['error'], field='path')
return doc
def capacityInfo(self):
try:
info = self.client.df()
return {
'free': info['capacity'] - info['used'],
'total': info['capacity']
}
except Exception:
return {
'free': None,
'total': None
}
def downloadFile(self, file, offset=0, headers=True, endByte=None,
contentDisposition=None, extraParameters=None, **kwargs):
if endByte is None or endByte > file['size']:
endByte = file['size']
if headers:
setResponseHeader('Accept-Ranges', 'bytes')
self.setContentHeaders(file, offset, endByte, contentDisposition)
if file['hdfs'].get('imported'):
path = file['hdfs']['path']
else:
path = self._absPath(file)
def stream():
position = 0
fileStream = self.client.cat([path]).next()
shouldBreak = False
for chunk in fileStream:
chunkLen = len(chunk)
if position < offset:
if position + chunkLen > offset:
if position + chunkLen > endByte:
chunkLen = endByte - position
shouldBreak = True
yield chunk[offset - position:chunkLen]
else:
if position + chunkLen > endByte:
chunkLen = endByte - position
shouldBreak = True
yield chunk[:chunkLen]
position += chunkLen
if shouldBreak:
break
return stream
def deleteFile(self, file):
"""
Only deletes the file if it is managed (i.e. not an imported file).
"""
if not file['hdfs'].get('imported'):
res = self.client.delete([self._absPath(file)]).next()
if not res['result']:
raise Exception('Failed to delete HDFS file %s: %s' % (
res['path'], res.get('error')))
def initUpload(self, upload):
uid = uuid.uuid4().hex
relPath = posixpath.join(uid[0:2], uid[2:4], uid)
upload['hdfs'] = {
'path': relPath
}
absPath = self._absPath(upload)
parentDir = posixpath.dirname(absPath)
if not self.client.test(parentDir, exists=True, directory=True):
res = self.client.mkdir([posixpath.dirname(absPath)],
create_parent=True).next()
if not res['result']:
raise Exception(res['error'])
if self.client.test(absPath, exists=True):
raise Exception('File already exists: %s.' % absPath)
res = self.client.touchz([absPath]).next()
if not res['result']:
raise Exception(res['error'])
return upload
def uploadChunk(self, upload, chunk):
# For now, we use webhdfs when writing files since the process of
# implementing the append operation ourselves with protobuf is too
# expensive. If snakebite adds support for append in future releases,
# we should use that instead.
url = ('http://%s:%d/webhdfs/v1%s?op=APPEND&namenoderpcaddress=%s:%d'
'&user.name=%s')
url %= (
self.assetstore['hdfs']['host'],
self.assetstore['hdfs']['webHdfsPort'],
self._absPath(upload),
self.assetstore['hdfs']['host'],
self.assetstore['hdfs']['port'],
self._getHdfsUser(self.assetstore)
)
resp = requests.post(url, allow_redirects=False)
try:
resp.raise_for_status()
except Exception:
logger.exception('HDFS response: ' + resp.text)
raise Exception('Error appending to HDFS, see log for details.')
if resp.status_code != 307:
raise Exception('Expected 307 redirection to data node, instead '
'got %d: %s' % (resp.status_code, resp.text))
resp = requests.post(resp.headers['Location'], data=chunk)
chunk.close()
try:
resp.raise_for_status()
except Exception:
logger.exception('HDFS response: ' + resp.text)
raise Exception('Error appending to HDFS, see log for details.')
upload['received'] = self.requestOffset(upload)
try:
resp.raise_for_status()
except Exception:
logger.exception('HDFS response: ' + resp.text)
raise Exception('Error appending to HDFS, see log for details.')
return upload
def finalizeUpload(self, upload, file):
file['hdfs'] = upload['hdfs']
return file
def cancelUpload(self, upload):
absPath = self._absPath(upload)
if self.client.test(absPath, exists=True):
res = self.client.delete([absPath]).next()
if not res['result']:
raise Exception('Failed to delete HDFS file %s: %s' % (
res['path'], res.get('error')))
def requestOffset(self, upload):
return self.client.stat([self._absPath(upload)])['length']
| 35.7 | 80 | 0.561345 |
b3e9af9a599f13497d4bf86bac6c14f6fd6413ad
| 6,691 |
py
|
Python
|
sabnzbd/articlecache.py
|
pcjacobse/sabnzbd
|
494e72a9963a1810e69f4e0f69df7c9dfb9256b0
|
[
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
sabnzbd/articlecache.py
|
pcjacobse/sabnzbd
|
494e72a9963a1810e69f4e0f69df7c9dfb9256b0
|
[
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
sabnzbd/articlecache.py
|
pcjacobse/sabnzbd
|
494e72a9963a1810e69f4e0f69df7c9dfb9256b0
|
[
"0BSD",
"PSF-2.0",
"BSD-2-Clause",
"MIT",
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/python -OO
# Copyright 2008-2017 The SABnzbd-Team <[email protected]>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""
sabnzbd.articlecache - Article cache handling
"""
import sys
import logging
import threading
import sabnzbd
from sabnzbd.decorators import synchronized
from sabnzbd.constants import GIGI, ANFO, Status
ARTICLE_LOCK = threading.Lock()
class ArticleCache(object):
do = None
def __init__(self):
self.__cache_limit_org = 0
self.__cache_limit = 0
self.__cache_size = 0
self.__article_list = [] # List of buffered articles
self.__article_table = {} # Dict of buffered articles
ArticleCache.do = self
@synchronized(ARTICLE_LOCK)
def cache_info(self):
return ANFO(len(self.__article_list), abs(self.__cache_size), self.__cache_limit_org)
@synchronized(ARTICLE_LOCK)
def new_limit(self, limit):
""" Called when cache limit changes """
self.__cache_limit_org = limit
if limit < 0:
self.__cache_limit = GIGI
else:
self.__cache_limit = min(limit, GIGI)
@synchronized(ARTICLE_LOCK)
def reserve_space(self, data):
""" Is there space left in the set limit? """
data_size = sys.getsizeof(data)*64
self.__cache_size += data_size
if self.__cache_size + data_size > self.__cache_limit:
return False
else:
return True
@synchronized(ARTICLE_LOCK)
def free_reserve_space(self, data):
""" Remove previously reserved space """
data_size = sys.getsizeof(data)*64
self.__cache_size -= data_size
return self.__cache_size + data_size < self.__cache_limit
@synchronized(ARTICLE_LOCK)
def save_article(self, article, data):
nzf = article.nzf
nzo = nzf.nzo
if nzo.is_gone():
# Do not discard this article because the
# file might still be processed at this moment!!
if sabnzbd.LOG_ALL:
logging.debug("%s is discarded", article)
return
saved_articles = article.nzf.nzo.saved_articles
if article not in saved_articles:
saved_articles.append(article)
if self.__cache_limit:
if self.__cache_limit < 0:
self.__add_to_cache(article, data)
else:
data_size = len(data)
while (self.__cache_size > (self.__cache_limit - data_size)) \
and self.__article_list:
# Flush oldest article in cache
old_article = self.__article_list.pop(0)
old_data = self.__article_table.pop(old_article)
self.__cache_size -= len(old_data)
# No need to flush if this is a refreshment article
if old_article != article:
self.__flush_article(old_article, old_data)
# Does our article fit into our limit now?
if (self.__cache_size + data_size) <= self.__cache_limit:
self.__add_to_cache(article, data)
else:
self.__flush_article(article, data)
else:
self.__flush_article(article, data)
@synchronized(ARTICLE_LOCK)
def load_article(self, article):
data = None
nzo = article.nzf.nzo
if article in self.__article_list:
data = self.__article_table.pop(article)
self.__article_list.remove(article)
self.__cache_size -= len(data)
if sabnzbd.LOG_ALL:
logging.debug("Loaded %s from cache", article)
elif article.art_id:
data = sabnzbd.load_data(article.art_id, nzo.workpath, remove=True,
do_pickle=False, silent=True)
if article in nzo.saved_articles:
nzo.remove_saved_article(article)
return data
@synchronized(ARTICLE_LOCK)
def flush_articles(self):
self.__cache_size = 0
while self.__article_list:
article = self.__article_list.pop(0)
data = self.__article_table.pop(article)
self.__flush_article(article, data)
@synchronized(ARTICLE_LOCK)
def purge_articles(self, articles):
if sabnzbd.LOG_ALL:
logging.debug("Purgable articles -> %s", articles)
for article in articles:
if article in self.__article_list:
self.__article_list.remove(article)
data = self.__article_table.pop(article)
self.__cache_size -= len(data)
if article.art_id:
sabnzbd.remove_data(article.art_id, article.nzf.nzo.workpath)
def __flush_article(self, article, data):
nzf = article.nzf
nzo = nzf.nzo
if nzo.is_gone():
# Do not discard this article because the
# file might still be processed at this moment!!
if sabnzbd.LOG_ALL:
logging.debug("%s is discarded", article)
return
art_id = article.get_art_id()
if art_id:
if sabnzbd.LOG_ALL:
logging.debug("Flushing %s to disk", article)
# Save data, but don't complain when destination folder is missing
# because this flush may come after completion of the NZO.
sabnzbd.save_data(data, art_id, nzo.workpath, do_pickle=False, silent=True)
else:
logging.warning("Flushing %s failed -> no art_id", article)
def __add_to_cache(self, article, data):
if article in self.__article_table:
self.__cache_size -= len(self.__article_table[article])
else:
self.__article_list.append(article)
self.__article_table[article] = data
self.__cache_size += len(data)
if sabnzbd.LOG_ALL:
logging.debug("Added %s to cache", article)
# Create the instance
ArticleCache()
| 34.312821 | 93 | 0.619937 |
78d3cd75fbc148d0f5f79dc0122ac646f29d5cf1
| 2,898 |
py
|
Python
|
lasing.py
|
Marshblocker/lasing
|
6b664a568c12e2494c35aa4a13e981ffc55dd542
|
[
"MIT"
] | null | null | null |
lasing.py
|
Marshblocker/lasing
|
6b664a568c12e2494c35aa4a13e981ffc55dd542
|
[
"MIT"
] | null | null | null |
lasing.py
|
Marshblocker/lasing
|
6b664a568c12e2494c35aa4a13e981ffc55dd542
|
[
"MIT"
] | null | null | null |
from os import system
from time import sleep
from typing import NewType, TypedDict
import random
GRID_CHAR = '*'
BOARD_WIDTH = 80
BOARD_HEIGHT = 40
class Walker(TypedDict):
pos: list[int]
free: bool
Traversed = bool
BoardType = NewType("BoardType", list[list[Traversed]])
WalkersType = NewType("WalkersType", list[Walker])
def populate_board(board: BoardType, n: int) -> WalkersType:
walkers = WalkersType([{"pos": [0, 0], "free": True} for _ in range(n)])
for i in range(n):
while True:
r: int = random.randint(0, BOARD_HEIGHT - 1)
c: int = random.randint(0, BOARD_WIDTH - 1)
if not board[r][c]:
walkers[i]["pos"] = [r, c]
board[r][c] = True
break
return walkers
def check_and_update_board(direction: int, old_pos: list[int],
board: BoardType) -> tuple[bool, list[int]]:
occupied: bool = True
r, c = old_pos
match direction:
case 0: # UP
r += 1
case 1: # RIGHT
c += 1
case 2: # DOWN
r -= 1
case 3: # LEFT
c -= 1
if 0 < r < BOARD_HEIGHT and 0 < c < BOARD_WIDTH:
occupied = board[r][c]
if not occupied:
board[r][c] = True
return (occupied, [r, c])
def clear_screen() -> None:
system('cls')
def print_board(board: BoardType, clear: bool = True) -> None:
board_str = "\n".join(["".join([GRID_CHAR if e else " " for e in row])
for row in board])
print(board_str)
if clear:
sleep(0.001)
clear_screen()
def main():
n = int(input("n: "))
board = BoardType([[False for _ in range(BOARD_WIDTH)]
for _ in range(BOARD_HEIGHT)])
walkers: WalkersType = populate_board(board, n)
stuck_walkers_count = 0
while stuck_walkers_count != n:
print_board(board)
for i in range(len(walkers)):
available_directions: list[int] = [0, 1, 2, 3]
if walkers[i]["free"]:
while len(available_directions):
direction: int = random.choice(available_directions)
occupied, new_pos = \
check_and_update_board(direction,
walkers[i]["pos"], board)
if occupied:
available_directions.remove(direction)
else:
walkers[i]["pos"] = new_pos
break
if not len(available_directions):
walkers[i]["free"] = False
stuck_walkers_count += 1
print_board(board, clear=False)
if __name__ == "__main__":
main()
| 27.6 | 77 | 0.503796 |
3092ec169a646e317504bfb1438b815c6dc1ccaf
| 9,761 |
py
|
Python
|
fanficfare/adapters/adapter_siyecouk.py
|
chocolatechipcats/FanFicFare
|
3874878e9548a250ceb672d88f579f02994f56cc
|
[
"Apache-2.0"
] | null | null | null |
fanficfare/adapters/adapter_siyecouk.py
|
chocolatechipcats/FanFicFare
|
3874878e9548a250ceb672d88f579f02994f56cc
|
[
"Apache-2.0"
] | null | null | null |
fanficfare/adapters/adapter_siyecouk.py
|
chocolatechipcats/FanFicFare
|
3874878e9548a250ceb672d88f579f02994f56cc
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2011 Fanficdownloader team, 2020 FanFicFare team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Software: eFiction
from __future__ import absolute_import
import logging
logger = logging.getLogger(__name__)
import re
from ..htmlcleanup import stripHTML
from .. import exceptions as exceptions
# py2 vs py3 transition
from .base_adapter import BaseSiteAdapter, makeDate
# This function is called by the downloader in all adapter_*.py files
# in this dir to register the adapter class. So it needs to be
# updated to reflect the class below it. That, plus getSiteDomain()
# take care of 'Registering'.
def getClass():
return SiyeCoUkAdapter # XXX
# Class name has to be unique. Our convention is camel case the
# sitename with Adapter at the end. www is skipped.
class SiyeCoUkAdapter(BaseSiteAdapter): # XXX
def __init__(self, config, url):
BaseSiteAdapter.__init__(self, config, url)
# get storyId from url--url validation guarantees query is only sid=1234
self.story.setMetadata('storyId',self.parsedUrl.query.split('=',)[1])
# normalized story URL.
self._setURL('https://' + self.getSiteDomain() + '/siye/viewstory.php?sid='+self.story.getMetadata('storyId'))
# Each adapter needs to have a unique site abbreviation.
self.story.setMetadata('siteabbrev','siye') # XXX
# The date format will vary from site to site.
# http://docs.python.org/library/datetime.html#strftime-strptime-behavior
self.dateformat = "%Y.%m.%d" # XXX
@staticmethod # must be @staticmethod, don't remove it.
def getSiteDomain():
# The site domain. Does have www here, if it uses it.
return 'www.siye.co.uk' # XXX
@classmethod
def getAcceptDomains(cls):
return ['www.siye.co.uk','siye.co.uk']
@classmethod
def getSiteExampleURLs(cls):
return "https://"+cls.getSiteDomain()+"/siye/viewstory.php?sid=1234"
def getSiteURLPattern(self):
return r"https?://(www\.)?siye\.co\.uk/(siye/)?"+re.escape("viewstory.php?sid=")+r"\d+$"
## Getting the chapter list and the meta data, plus 'is adult' checking.
def extractChapterUrlsAndMetadata(self):
# index=1 makes sure we see the story chapter index. Some
# sites skip that for one-chapter stories.
# Except it doesn't this time. :-/
url = self.url #+'&index=1'+addurl
logger.debug("URL: "+url)
data = self.get_request(url)
soup = self.make_soup(data)
# print data
# Find authorid and URL from... author url.
a = soup.find('a', href=re.compile(r"viewuser.php\?uid=\d+"))
if a is None:
raise exceptions.StoryDoesNotExist(self.url)
self.story.setMetadata('authorId',a['href'].split('=')[1])
self.story.setMetadata('authorUrl','https://'+self.host+'/siye/'+a['href'])
self.story.setMetadata('author',a.string)
# need(or easier) to pull other metadata from the author's list page.
authsoup = self.make_soup(self.get_request(self.story.getMetadata('authorUrl')))
# remove author profile incase they've put the story URL in their bio.
profile = authsoup.find('div',{'id':'profile'})
if profile: # in case it changes.
profile.extract()
## Title
titlea = authsoup.find('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+"$"))
self.story.setMetadata('title',stripHTML(titlea))
# Find the chapters (from soup, not authsoup):
for chapter in soup.findAll('a', href=re.compile(r'viewstory.php\?sid='+self.story.getMetadata('storyId')+r"&chapter=\d+$")):
# just in case there's tags, like <i> in chapter titles.
self.add_chapter(chapter,'https://'+self.host+'/siye/'+chapter['href'])
if self.num_chapters() < 1:
self.add_chapter(self.story.getMetadata('title'),url)
# The stuff we can get from the chapter list/one-shot page are
# in the first table with 95% width.
metatable = soup.find('table',{'width':'95%'})
# Categories
cat_as = metatable.findAll('a', href=re.compile(r'categories.php'))
for cat_a in cat_as:
self.story.addToList('category',stripHTML(cat_a))
for label in metatable.find_all('b'):
# html5lib doesn't give me \n for <br> anymore.
# I expect there's a better way, but this is what came to
# mind today. -JM
part = stripHTML(label)
nxtbr = label.find_next_sibling('br')
nxtsib = label.next_sibling
value = ""
while nxtsib != nxtbr:
value += stripHTML(nxtsib)
nxtsib = nxtsib.next_sibling
# logger.debug("label:%s value:%s"%(part,value))
if part.startswith("Characters:"):
for item in value.split(', '):
if item == "Harry/Ginny":
self.story.addToList('characters',"Harry Potter")
self.story.addToList('characters',"Ginny Weasley")
elif item not in ("None","All"):
self.story.addToList('characters',item)
if part.startswith("Genres:"):
self.story.extendList('genre',value.split(', '))
if part.startswith("Warnings:"):
if value != "None":
self.story.extendList('warnings',value.split(', '))
if part.startswith("Rating:"):
self.story.setMetadata('rating',value)
if part.startswith("Summary:"):
# summary can include extra br and b tags go until Hitcount
summary = ""
nxt = label.next_sibling
while nxt and "Hitcount:" not in stripHTML(nxt):
summary += "%s"%nxt
# logger.debug(summary)
nxt = nxt.next_sibling
if summary.strip().endswith("<br/>"):
summary = summary.strip()[0:-len("<br/>")]
self.setDescription(url,summary)
# Stuff from author block:
# SIYE formats stories in the author list differently when
# their part of a series. Look for non-series...
divdesc = titlea.parent.parent.find('div',{'class':'desc'})
if not divdesc:
# ... now look for series.
divdesc = titlea.parent.parent.findNextSibling('tr').find('div',{'class':'desc'})
moremeta = stripHTML(divdesc)
# logger.debug("moremeta:%s"%moremeta)
# html5lib doesn't give me \n for <br> anymore.
for part in moremeta.replace(' - ','\n').replace("Completed","\nCompleted").split('\n'):
# logger.debug("part:%s"%part)
try:
(name,value) = part.split(': ')
except:
# not going to worry about fancier processing for the bits
# that don't match.
continue
name=name.strip()
value=value.strip()
if name == 'Published':
self.story.setMetadata('datePublished', makeDate(value, self.dateformat))
if name == 'Updated':
self.story.setMetadata('dateUpdated', makeDate(value, self.dateformat))
if name == 'Completed':
if value == 'Yes':
self.story.setMetadata('status', 'Completed')
else:
self.story.setMetadata('status', 'In-Progress')
if name == 'Words':
self.story.setMetadata('numWords', value)
try:
# Find Series name from series URL.
a = titlea.findPrevious('a', href=re.compile(r"series.php\?seriesid=\d+"))
series_name = a.string
series_url = 'https://'+self.host+'/'+a['href']
seriessoup = self.make_soup(self.get_request(series_url))
storyas = seriessoup.findAll('a', href=re.compile(r'^viewstory.php\?sid=\d+$'))
i=1
for a in storyas:
if a['href'] == ('viewstory.php?sid='+self.story.getMetadata('storyId')):
self.setSeries(series_name, i)
self.story.setMetadata('seriesUrl',series_url)
break
i+=1
except:
# I find it hard to care if the series parsing fails
pass
# grab the text for an individual chapter.
def getChapterText(self, url):
logger.debug('Getting chapter text from: %s' % url)
# soup = self.make_soup(self.get_request(url))
# BeautifulSoup objects to <p> inside <span>, which
# technically isn't allowed.
soup = self.make_soup(self.get_request(url))
# not the most unique thing in the world, but it appears to be
# the best we can do here.
story = soup.find('span', {'style' : 'font-size: 100%;'})
if None == story:
raise exceptions.FailedToDownload("Error downloading Chapter: %s! Missing required element!" % url)
story.name='div'
return self.utf8FromSoup(url,story)
| 40.334711 | 133 | 0.594304 |
a57a8fbed9930e7770c694430f28667ab642602c
| 10,255 |
py
|
Python
|
challenge.py
|
Melody-Lii/Movies-ETL
|
cf2568ac3dac8d82286d9dfab5a01a3c059ec24e
|
[
"MIT"
] | null | null | null |
challenge.py
|
Melody-Lii/Movies-ETL
|
cf2568ac3dac8d82286d9dfab5a01a3c059ec24e
|
[
"MIT"
] | null | null | null |
challenge.py
|
Melody-Lii/Movies-ETL
|
cf2568ac3dac8d82286d9dfab5a01a3c059ec24e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding: utf-8
# In[13]:
import json
import pandas as pd
import numpy as np
import time
import re
from sqlalchemy import create_engine
import psycopg2
from config import db_password
def clean_movie(movie):
movie = dict(movie) #create a non-destructive copy
alt_titles = {}
# combine alternate titles into one list
for key in ['Also known as','Arabic','Cantonese','Chinese','French',
'Hangul','Hebrew','Hepburn','Japanese','Literally',
'Mandarin','McCune-Reischauer','Original title','Polish',
'Revised Romanization','Romanized','Russian',
'Simplified','Traditional','Yiddish']:
if key in movie:
alt_titles[key] = movie[key]
movie.pop(key)
if len(alt_titles) > 0:
movie['alt_titles'] = alt_titles
# merge column names
def change_column_name(old_name, new_name):
if old_name in movie:
movie[new_name] = movie.pop(old_name)
change_column_name('Adaptation by', 'Writer(s)')
change_column_name('Country of origin', 'Country')
change_column_name('Directed by', 'Director')
change_column_name('Distributed by', 'Distributor')
change_column_name('Edited by', 'Editor(s)')
change_column_name('Length', 'Running time')
change_column_name('Original release', 'Release date')
change_column_name('Music by', 'Composer(s)')
change_column_name('Produced by', 'Producer(s)')
change_column_name('Producer', 'Producer(s)')
change_column_name('Productioncompanies ', 'Production company(s)')
change_column_name('Productioncompany ', 'Production company(s)')
change_column_name('Released', 'Release Date')
change_column_name('Release Date', 'Release date')
change_column_name('Screen story by', 'Writer(s)')
change_column_name('Screenplay by', 'Writer(s)')
change_column_name('Story by', 'Writer(s)')
change_column_name('Theme music composer', 'Composer(s)')
change_column_name('Written by', 'Writer(s)')
return movie
def extract_transform_load(wiki_file, kaggle_file, ratings_file):
with open(wiki_file, mode='r') as file:
wiki_movies_raw = json.load(file)
kaggle_metadata = pd.read_csv(kaggle_file)
ratings = pd.read_csv(ratings_file)
wiki_movies = [movie for movie in wiki_movies_raw
if ('Director' in movie or 'Directed by' in movie)
and 'imdb_link' in movie]
clean_movies = [clean_movie(movie) for movie in wiki_movies]
wiki_movies_df = pd.DataFrame(clean_movies)
# Assuming wikipedia data still contains IMDB id
try:
wiki_movies_df['imdb_id'] = wiki_movies_df['imdb_link'].str.extract(r'(tt\d{7})')
wiki_movies_df.drop_duplicates(subset='imdb_id', inplace=True)
except Exception as e:
print(e)
wiki_columns_to_keep = [column for column in wiki_movies_df.columns if wiki_movies_df[column].isnull().sum() < len(wiki_movies_df) * 0.9]
wiki_movies_df = wiki_movies_df[wiki_columns_to_keep]
box_office = wiki_movies_df['Box office'].dropna()
box_office = box_office.apply(lambda x: ' '.join(x) if type(x) == list else x)
form_one = r'\$\d+\.?\d*\s*[mb]illion'
form_two = r'\$\d{1,3}(?:,\d{3})+'
def parse_dollars(s):
# if s is not a string, return NaN
if type(s) != str:
return np.nan
# if input is of the form $###.# million
if re.match(r'\$\s*\d+\.?\d*\s*milli?on', s, flags=re.IGNORECASE):
# remove dollar sign and " million"
s = re.sub(r'\$|\s|[a-zA-Z]','', s)
# convert to float and multiply by a million
value = float(s) * 10**6
# return value
return value
# if input is of the form $###.# billion
elif re.match(r'\$\s*\d+\.?\d*\s*billi?on', s, flags=re.IGNORECASE):
# remove dollar sign and " billion"
s = re.sub(r'\$|\s|[a-zA-Z]','', s)
# convert to float and multiply by a billion
value = float(s) * 10**9
# return value
return value
# if input is of the form $###,###,###
elif re.match(r'\$\s*\d{1,3}(?:[,\.]\d{3})+(?!\s[mb]illion)', s, flags=re.IGNORECASE):
# remove dollar sign and commas
s = re.sub('\$|,','', s)
# convert to float
value = float(s)
# return value
return value
# otherwise, return NaN
else:
return np.nan
wiki_movies_df['box_office'] = box_office.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
wiki_movies_df.drop('Box office', axis=1, inplace=True)
budget = wiki_movies_df['Budget'].dropna()
budget = budget.map(lambda x: ' '.join(x) if type(x) == list else x)
budget = budget.str.replace(r'\$.*[-—–](?![a-z])', '$', regex=True)
wiki_movies_df['budget'] = budget.str.extract(f'({form_one}|{form_two})', flags=re.IGNORECASE)[0].apply(parse_dollars)
wiki_movies_df.drop('Budget', axis=1, inplace=True)
release_date = wiki_movies_df['Release date'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
date_form_one = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s[123]\d,\s\d{4}'
date_form_two = r'\d{4}.[01]\d.[123]\d'
date_form_three = r'(?:January|February|March|April|May|June|July|August|September|October|November|December)\s\d{4}'
date_form_four = r'\d{4}'
wiki_movies_df['release_date'] = pd.to_datetime(release_date.str.extract(f'({date_form_one}|{date_form_two}|{date_form_three}|{date_form_four})')[0], infer_datetime_format=True)
running_time = wiki_movies_df['Running time'].dropna().apply(lambda x: ' '.join(x) if type(x) == list else x)
running_time_extract = running_time.str.extract(r'(\d+)\s*ho?u?r?s?\s*(\d*)|(\d+)\s*m')
running_time_extract = running_time_extract.apply(lambda col: pd.to_numeric(col, errors='coerce')).fillna(0)
wiki_movies_df['running_time'] = running_time_extract.apply(lambda row: row[0]*60 + row[1] if row[2] == 0 else row[2], axis=1)
running_time[running_time.str.contains(r'^\d*\s*m', flags=re.IGNORECASE) != True]
kaggle_metadata = kaggle_metadata[kaggle_metadata['adult'] == 'False'].drop('adult',axis='columns')
kaggle_metadata['video'] = kaggle_metadata['video'] == 'True'
kaggle_metadata['budget'] = kaggle_metadata['budget'].astype(int)
kaggle_metadata['id'] = pd.to_numeric(kaggle_metadata['id'], errors='raise')
kaggle_metadata['popularity'] = pd.to_numeric(kaggle_metadata['popularity'], errors='raise')
kaggle_metadata['release_date'] = pd.to_datetime(kaggle_metadata['release_date'])
movies_df = pd.merge(wiki_movies_df, kaggle_metadata, on='imdb_id', suffixes=['_wiki','_kaggle'])
movies_df.drop(columns=['title_wiki','release_date_wiki','Language','Production company(s)'], inplace=True)
def fill_missing_kaggle_data(df, kaggle_column, wiki_column):
df[kaggle_column] = df.apply(
lambda row: row[wiki_column] if row[kaggle_column] == 0 else row[kaggle_column]
, axis=1)
df.drop(columns=wiki_column, inplace=True)
fill_missing_kaggle_data(movies_df, 'runtime', 'running_time')
fill_missing_kaggle_data(movies_df, 'budget_kaggle', 'budget_wiki')
fill_missing_kaggle_data(movies_df, 'revenue', 'box_office')
movies_df = movies_df.loc[:, ['imdb_id','id','title_kaggle','original_title','tagline','belongs_to_collection','url','imdb_link',
'runtime','budget_kaggle','revenue','release_date_kaggle','popularity','vote_average','vote_count',
'genres','original_language','overview','spoken_languages','Country',
'production_companies','production_countries','Distributor',
'Producer(s)','Director','Starring','Cinematography','Editor(s)','Writer(s)','Composer(s)','Based on'
]]
movies_df.rename({'id':'kaggle_id',
'title_kaggle':'title',
'url':'wikipedia_url',
'budget_kaggle':'budget',
'release_date_kaggle':'release_date',
'Country':'country',
'Distributor':'distributor',
'Producer(s)':'producers',
'Director':'director',
'Starring':'starring',
'Cinematography':'cinematography',
'Editor(s)':'editors',
'Writer(s)':'writers',
'Composer(s)':'composers',
'Based on':'based_on'
}, axis='columns', inplace=True)
rating_counts = ratings.groupby(['movieId','rating'], as_index=False).count().rename({'userId':'count'}, axis=1).pivot(index='movieId',columns='rating', values='count')
rating_counts.columns = ['rating_' + str(col) for col in rating_counts.columns]
movies_with_ratings_df = pd.merge(movies_df, rating_counts, left_on='kaggle_id', right_index=True, how='left')
movies_with_ratings_df[rating_counts.columns] = movies_with_ratings_df[rating_counts.columns].fillna(0)
db_string = f"postgres://postgres:{db_password}@localhost:5433/movie_data"
engine = create_engine(db_string)
movies_df.to_sql(name='movies', con=engine, if_exists = 'append')
rows_imported = 0
# get the start_time from time.time()
start_time = time.time()
for data in pd.read_csv(f'{file_dir}/ratings.csv', chunksize=1000000):
print(f'importing rows {rows_imported} to {rows_imported + len(data)}...', end='')
data.to_sql(name='ratings', con=engine, if_exists='append')
rows_imported += len(data)
# add elapsed time to final print out
print(f'Done. {time.time() - start_time} total seconds elapsed')
file_dir = "./Resources"
wiki_file = f'{file_dir}/wikipedia.movies.json'
kaggle_file = f'{file_dir}/movies_metadata.csv'
ratings_file = f'{file_dir}/ratings.csv'
extract_transform_load(wiki_file, kaggle_file, ratings_file)
# In[ ]:
| 42.028689 | 181 | 0.633642 |
1847a8c4ef679740afba8ec8ca86b5d21d5f3e94
| 808 |
py
|
Python
|
haystack_test/config/urls.py
|
salmanwahed/haystack-test-project
|
2fa0b4c0151456637099e81d3394dde800df79e9
|
[
"Apache-2.0"
] | null | null | null |
haystack_test/config/urls.py
|
salmanwahed/haystack-test-project
|
2fa0b4c0151456637099e81d3394dde800df79e9
|
[
"Apache-2.0"
] | null | null | null |
haystack_test/config/urls.py
|
salmanwahed/haystack-test-project
|
2fa0b4c0151456637099e81d3394dde800df79e9
|
[
"Apache-2.0"
] | null | null | null |
"""haystack_test URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', include('noticeboard.urls')),
]
| 35.130435 | 77 | 0.700495 |
2aaa1595540aaf1082de4ecf1dbc51c11518ce65
| 1,306 |
py
|
Python
|
examples/gaussian_mixture.py
|
gmourier/MLAlgorithms
|
b6d1489cef7ebaa5603cec8564e0e3543419669b
|
[
"MIT"
] | null | null | null |
examples/gaussian_mixture.py
|
gmourier/MLAlgorithms
|
b6d1489cef7ebaa5603cec8564e0e3543419669b
|
[
"MIT"
] | null | null | null |
examples/gaussian_mixture.py
|
gmourier/MLAlgorithms
|
b6d1489cef7ebaa5603cec8564e0e3543419669b
|
[
"MIT"
] | 1 |
2022-02-15T21:30:18.000Z
|
2022-02-15T21:30:18.000Z
|
import random
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from mla.kmeans import KMeans
from mla.gaussian_mixture import GaussianMixture
random.seed(1)
np.random.seed(6)
def make_clusters(skew=True, *arg, **kwargs):
X, y = datasets.make_blobs(*arg, **kwargs)
if skew:
nrow = X.shape[1]
for i in np.unique(y):
X[y == i] = X[y == i].dot(np.random.random((nrow, nrow)) - 0.5)
return X, y
def KMeans_and_GMM(K):
COLOR = 'bgrcmyk'
X, y = make_clusters(skew=True, n_samples=1500, centers=K)
_, axes = plt.subplots(1, 3)
# Ground Truth
axes[0].scatter(X[:, 0], X[:, 1], c=[COLOR[int(assignment)] for assignment in y])
axes[0].set_title("Ground Truth")
# KMeans
kmeans = KMeans(K=K, init='++')
kmeans.fit(X)
y_kmeans = kmeans.predict()
c_kmeans = np.array(kmeans.centroids)
axes[1].scatter(X[:, 0], X[:, 1], c=[COLOR[int(assignment)] for assignment in y_kmeans])
axes[1].scatter(c_kmeans[:, 0], c_kmeans[:, 1], c=COLOR[:K], marker="o", s=500)
axes[1].set_title("KMeans")
# Gaussian Mixture
gmm = GaussianMixture(K=K, init='kmeans')
gmm.fit(X)
axes[2].set_title("Gaussian Mixture")
gmm.plot(ax=axes[2])
if __name__ == "__main__":
KMeans_and_GMM(4)
| 26.653061 | 92 | 0.6317 |
42419702eee24bfa046a2a4461b152965a1f24ea
| 2,220 |
py
|
Python
|
tools/aicity20/vis_result.py
|
Johere/AICity2020-VOC-ReID
|
21268535595c8c90b87cd1ee89ddbcb341a86d76
|
[
"MIT"
] | 100 |
2020-04-25T03:58:01.000Z
|
2022-03-30T18:24:17.000Z
|
tools/aicity20/vis_result.py
|
hanleiyu/prcv
|
df5ad9469b38b8176121357fe5de2b1cf30aae1c
|
[
"MIT"
] | 30 |
2020-04-27T07:15:00.000Z
|
2022-01-03T19:49:49.000Z
|
tools/aicity20/vis_result.py
|
hanleiyu/prcv
|
df5ad9469b38b8176121357fe5de2b1cf30aae1c
|
[
"MIT"
] | 25 |
2020-04-25T22:53:30.000Z
|
2022-03-28T00:46:51.000Z
|
import numpy as np
import cv2
import os
import sys
sys.path.append('.')
from lib.data.datasets.aicity20_trainval import AICity20Trainval
def visualize_submit(dataset, out_dir, submit_txt_path, topk=5):
query_dir = dataset.query_dir
gallery_dir = dataset.gallery_dir
vis_size = (256, 256)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
results = []
with open(submit_txt_path, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip()
results.append(line.split(' '))
query_pids = [pid for _, pid, _ in dataset.query]
img_to_pid = {}
for img_path, pid, _ in dataset.gallery:
name = os.path.basename(img_path)
img_to_pid[name] = pid
for i, result in enumerate(results):
is_False = False
# query_path = os.path.join(query_dir, str(i+1).zfill(6)+'.jpg')
query_path = os.path.join(query_dir, os.path.basename(dataset.query[i][0]))
gallery_paths = []
for name in result:
# gallery_paths.append(os.path.join(gallery_dir, index.zfill(6)+'.jpg'))
gallery_paths.append(os.path.join(gallery_dir, name))
imgs = []
imgs.append(cv2.resize(cv2.imread(query_path), vis_size))
for n in range(topk):
img = cv2.resize(cv2.imread(gallery_paths[n]), vis_size)
if query_pids[i] != img_to_pid[result[n]]:
img = cv2.rectangle(img, (0, 0), vis_size, (0, 0, 255), 2)
is_False = True
imgs.append(img)
canvas = np.concatenate(imgs, axis=1)
#if is_False:
cv2.imwrite(os.path.join(out_dir, os.path.basename(query_path)), canvas)
if __name__ == '__main__':
# dataset_dir = '/home/xiangyuzhu/data/ReID/AIC20_ReID'
dataset = AICity20Trainval(root='/home/zxy/data/ReID/vehicle')
#
# dataset_dir = '/home/zxy/data/ReID/vehicle/AIC20_ReID_Cropped'
# query_dir = os.path.join(dataset_dir, 'image_query')
# gallery_dir = os.path.join(dataset_dir, 'image_test')
out_dir = 'vis/'
submit_txt_path = './output/aicity20/experiments/circle-sim-aug/result_voc.txt'
visualize_submit(dataset, out_dir, submit_txt_path)
| 35.238095 | 84 | 0.636937 |
55fe123b03c8b885e313e01741da048dccfd9e5f
| 700 |
py
|
Python
|
migrations/versions/0a63dc36c3b2_add_column_pass_secure_for_storing_.py
|
carolwanjohi/watchlist
|
ae15964bb272b834b57e6856bcdd4f9b8ce1d2a6
|
[
"MIT"
] | null | null | null |
migrations/versions/0a63dc36c3b2_add_column_pass_secure_for_storing_.py
|
carolwanjohi/watchlist
|
ae15964bb272b834b57e6856bcdd4f9b8ce1d2a6
|
[
"MIT"
] | null | null | null |
migrations/versions/0a63dc36c3b2_add_column_pass_secure_for_storing_.py
|
carolwanjohi/watchlist
|
ae15964bb272b834b57e6856bcdd4f9b8ce1d2a6
|
[
"MIT"
] | null | null | null |
"""Add column pass_secure for storing passwords
Revision ID: 0a63dc36c3b2
Revises: e34c84c48c61
Create Date: 2017-10-23 16:38:02.302281
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0a63dc36c3b2'
down_revision = 'e34c84c48c61'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('users', sa.Column('pass_secure', sa.String(length=255), nullable=True))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column('users', 'pass_secure')
# ### end Alembic commands ###
| 24.137931 | 90 | 0.704286 |
706897bd914e8bfd349f17d449f8fbbec7a79000
| 5,964 |
py
|
Python
|
graph_embedding/dmon/utilities/graph.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 23,901 |
2018-10-04T19:48:53.000Z
|
2022-03-31T21:27:42.000Z
|
graph_embedding/dmon/utilities/graph.py
|
DionysisChristopoulos/google-research
|
7f59ef421beef32ca16c2a7215be74f7eba01a0f
|
[
"Apache-2.0"
] | 891 |
2018-11-10T06:16:13.000Z
|
2022-03-31T10:42:34.000Z
|
graph_embedding/dmon/utilities/graph.py
|
admariner/google-research
|
7cee4b22b925581d912e8d993625c180da2a5a4f
|
[
"Apache-2.0"
] | 6,047 |
2018-10-12T06:31:02.000Z
|
2022-03-31T13:59:28.000Z
|
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Graph construction utility functions.
Functions for graph manipulation and creation.
TODO(tsitsulin): add headers, tests, and improve style.
"""
import pickle
import sys
import networkx as nx
import numpy as np
import scipy.sparse
from scipy.sparse import csr_matrix
from scipy.sparse import lil_matrix
from scipy.sparse.base import spmatrix
from sklearn.neighbors import kneighbors_graph
import tensorflow as tf
def construct_knn_graph(data, k = 15, symmetrize = True):
graph = kneighbors_graph(data, k)
if symmetrize:
graph = graph + graph.T
graph.data = np.ones(graph.data.shape)
return graph
def normalize_graph(graph, # pylint: disable=missing-function-docstring
normalized,
add_self_loops = True):
if add_self_loops: # Bröther may i have some self-lööps
graph = graph + scipy.sparse.identity(graph.shape[0])
degree = np.squeeze(np.asarray(graph.sum(axis=1)))
if normalized:
with np.errstate(divide='ignore'):
degree = 1. / np.sqrt(degree)
degree[degree == np.inf] = 0
degree = scipy.sparse.diags(degree)
return degree @ graph @ degree
else:
with np.errstate(divide='ignore'):
degree = 1. / degree
degree[degree == np.inf] = 0
degree = scipy.sparse.diags(degree)
return degree @ graph
def scipy_to_tf(matrix):
matrix = matrix.tocoo()
return tf.sparse.SparseTensor(
np.vstack([matrix.row, matrix.col]).T, matrix.data.astype(np.float32),
matrix.shape)
def load_npz_to_sparse_graph(file_name): # pylint: disable=missing-function-docstring
with np.load(open(file_name, 'rb'), allow_pickle=True) as loader:
loader = dict(loader)
adj_matrix = csr_matrix(
(loader['adj_data'], loader['adj_indices'], loader['adj_indptr']),
shape=loader['adj_shape'])
if 'attr_data' in loader:
# Attributes are stored as a sparse CSR matrix
attr_matrix = csr_matrix(
(loader['attr_data'], loader['attr_indices'], loader['attr_indptr']),
shape=loader['attr_shape']).todense()
elif 'attr_matrix' in loader:
# Attributes are stored as a (dense) np.ndarray
attr_matrix = loader['attr_matrix']
else:
raise Exception('No attributes in the data file', file_name)
if 'labels_data' in loader:
# Labels are stored as a CSR matrix
labels = csr_matrix((loader['labels_data'], loader['labels_indices'],
loader['labels_indptr']),
shape=loader['labels_shape'])
label_mask = labels.nonzero()[0]
labels = labels.nonzero()[1]
elif 'labels' in loader:
# Labels are stored as a numpy array
labels = loader['labels']
label_mask = np.ones(labels.shape, dtype=np.bool)
else:
raise Exception('No labels in the data file', file_name)
return adj_matrix, attr_matrix, labels, label_mask
def _parse_index_file(filename):
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def _sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def load_kipf_data(path_str, dataset_str): # pylint: disable=missing-function-docstring
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open('{}/ind.{}.{}'.format(path_str, dataset_str, names[i]),
'rb') as f:
if sys.version_info > (3, 0):
objects.append(pickle.load(f, encoding='latin1'))
else:
objects.append(pickle.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects) # pylint: disable=unbalanced-tuple-unpacking
test_idx_reorder = _parse_index_file('{}/ind.{}.test.index'.format(
path_str, dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(
min(test_idx_reorder),
max(test_idx_reorder) + 1)
tx_extended = lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range - min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range - min(test_idx_range), :] = ty
ty = ty_extended
features = scipy.sparse.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y) + 500)
train_mask = _sample_mask(idx_train, labels.shape[0])
val_mask = _sample_mask(idx_val, labels.shape[0])
test_mask = _sample_mask(idx_test, labels.shape[0])
y_train = np.zeros(labels.shape)
y_val = np.zeros(labels.shape)
y_test = np.zeros(labels.shape)
y_train[train_mask, :] = labels[train_mask, :]
y_val[val_mask, :] = labels[val_mask, :]
y_test[test_mask, :] = labels[test_mask, :]
labels = (y_train + y_val + y_test).nonzero()[1]
label_mask = (y_train + y_val + y_test).nonzero()[0]
return adj, features.todense(), labels, label_mask
| 34.08 | 96 | 0.682596 |
dfef23651c8de08e952a3336cf1ce12511d4911d
| 15,110 |
py
|
Python
|
pyscf/prop/polarizability/uhf.py
|
pavanell/pyscf
|
c0d19e499685e95dbf4c879539ad3a3ceb6934e2
|
[
"Apache-2.0"
] | 2 |
2019-05-28T05:25:56.000Z
|
2019-11-09T02:16:43.000Z
|
pyscf/prop/polarizability/uhf.py
|
pavanell/pyscf
|
c0d19e499685e95dbf4c879539ad3a3ceb6934e2
|
[
"Apache-2.0"
] | 2 |
2019-09-16T17:58:31.000Z
|
2019-09-22T17:26:01.000Z
|
pyscf/prop/polarizability/uhf.py
|
pavanell/pyscf
|
c0d19e499685e95dbf4c879539ad3a3ceb6934e2
|
[
"Apache-2.0"
] | 2 |
2020-06-01T05:31:38.000Z
|
2022-02-08T02:38:33.000Z
|
#!/usr/bin/env python
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Author: Qiming Sun <[email protected]>
#
'''
Non-relativistic static and dynamic polarizability and hyper-polarizability tensor
(In testing)
'''
import time
from functools import reduce
import numpy
from pyscf import lib
from pyscf.lib import logger
from pyscf.scf import ucphf
from pyscf.soscf.newton_ah import _gen_uhf_response
from pyscf.prop.polarizability import rhf as rhf_polarizability
def dipole(mf):
return mf.dip_moment(mf.mol, mf.make_rdm1())
# Note: polarizability and relevant properties are demanding on basis sets.
# ORCA recommends to use Sadlej basis for these properties.
def polarizability(polobj, with_cphf=True):
from pyscf.prop.nmr import uhf as uhf_nmr
log = logger.new_logger(polobj)
mf = polobj._scf
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbva = mo0a[:,~occidxa]
orbob = mo0b[:, occidxb]
orbvb = mo0b[:,~occidxb]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
int_r = mol.intor_symmetric('int1e_r', comp=3)
h1a = lib.einsum('xpq,pi,qj->xij', int_r, mo0a.conj(), orboa)
h1b = lib.einsum('xpq,pi,qj->xij', int_r, mo0b.conj(), orbob)
s1a = numpy.zeros_like(h1a)
s1b = numpy.zeros_like(h1b)
vind = polobj.gen_vind(mf, mo_coeff, mo_occ)
if with_cphf:
mo1 = ucphf.solve(vind, mo_energy, mo_occ, (h1a,h1b), (s1a,s1b),
polobj.max_cycle_cphf, polobj.conv_tol,
verbose=log)[0]
else:
mo1 = uhf_nmr._solve_mo1_uncoupled(mo_energy, mo_occ, (h1a,h1b),
(s1a,s1b))[0]
e2 = numpy.einsum('xpi,ypi->xy', h1a, mo1[0])
e2+= numpy.einsum('xpi,ypi->xy', h1b, mo1[1])
e2 = -(e2 + e2.T)
if mf.verbose >= logger.INFO:
xx, yy, zz = e2.diagonal()
log.note('Isotropic polarizability %.12g', (xx+yy+zz)/3)
log.note('Polarizability anisotropy %.12g',
(.5 * ((xx-yy)**2 + (yy-zz)**2 + (zz-xx)**2))**.5)
log.debug('Static polarizability tensor\n%s', e2)
return e2
def hyper_polarizability(polobj, with_cphf=True):
from pyscf.prop.nmr import uhf as uhf_nmr
log = logger.new_logger(polobj)
mf = polobj._scf
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbva = mo0a[:,~occidxa]
orbob = mo0b[:, occidxb]
orbvb = mo0b[:,~occidxb]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
int_r = mol.intor_symmetric('int1e_r', comp=3)
h1a = lib.einsum('xpq,pi,qj->xij', int_r, mo0a.conj(), orboa)
h1b = lib.einsum('xpq,pi,qj->xij', int_r, mo0b.conj(), orbob)
s1a = numpy.zeros_like(h1a)
s1b = numpy.zeros_like(h1b)
vind = polobj.gen_vind(mf, mo_coeff, mo_occ)
if with_cphf:
mo1, e1 = ucphf.solve(vind, mo_energy, mo_occ, (h1a,h1b), (s1a,s1b),
polobj.max_cycle_cphf, polobj.conv_tol, verbose=log)
else:
mo1, e1 = uhf_nmr._solve_mo1_uncoupled(mo_energy, mo_occ, (h1a,h1b),
(s1a,s1b))
mo1a = lib.einsum('xqi,pq->xpi', mo1[0], mo0a)
mo1b = lib.einsum('xqi,pq->xpi', mo1[1], mo0b)
dm1a = lib.einsum('xpi,qi->xpq', mo1a, orboa)
dm1b = lib.einsum('xpi,qi->xpq', mo1b, orbob)
dm1a = dm1a + dm1a.transpose(0,2,1)
dm1b = dm1b + dm1b.transpose(0,2,1)
vresp = _gen_uhf_response(mf, hermi=1)
h1ao = int_r + vresp(numpy.stack((dm1a, dm1b)))
s0 = mf.get_ovlp()
e3 = lib.einsum('xpq,ypi,zqi->xyz', h1ao[0], mo1a, mo1a)
e3 += lib.einsum('xpq,ypi,zqi->xyz', h1ao[1], mo1b, mo1b)
e3 -= lib.einsum('pq,xpi,yqj,zij->xyz', s0, mo1a, mo1a, e1[0])
e3 -= lib.einsum('pq,xpi,yqj,zij->xyz', s0, mo1b, mo1b, e1[1])
e3 = (e3 + e3.transpose(1,2,0) + e3.transpose(2,0,1) +
e3.transpose(0,2,1) + e3.transpose(1,0,2) + e3.transpose(2,1,0))
e3 = -e3
log.debug('Static hyper polarizability tensor\n%s', e3)
return e3
# Solve the frequency-dependent CPHF problem
# [A-wI, B ] [X] + [h1] = [0]
# [B , A+wI] [Y] [h1] [0]
def ucphf_with_freq(mf, mo_energy, mo_occ, h1, freq=0,
max_cycle=20, tol=1e-9, hermi=False, verbose=logger.WARN):
log = logger.new_logger(verbose=verbose)
t0 = (time.clock(), time.time())
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
viridxa = ~occidxa
viridxb = ~occidxb
mo_ea, mo_eb = mo_energy
# e_ai - freq may produce very small elements which can cause numerical
# issue in krylov solver
LEVEL_SHIF = 0.1
e_ai_a = lib.direct_sum('a-i->ai', mo_ea[viridxa], mo_ea[occidxa]).ravel()
e_ai_b = lib.direct_sum('a-i->ai', mo_eb[viridxb], mo_eb[occidxb]).ravel()
diag = (e_ai_a - freq,
e_ai_b - freq,
e_ai_a + freq,
e_ai_b + freq)
diag[0][diag[0] < LEVEL_SHIF] += LEVEL_SHIF
diag[1][diag[1] < LEVEL_SHIF] += LEVEL_SHIF
diag[2][diag[2] < LEVEL_SHIF] += LEVEL_SHIF
diag[3][diag[3] < LEVEL_SHIF] += LEVEL_SHIF
mo0a, mo0b = mf.mo_coeff
nao, nmoa = mo0a.shape
nmob = mo0b.shape
orbva = mo0a[:,viridxa]
orbvb = mo0b[:,viridxb]
orboa = mo0a[:,occidxa]
orbob = mo0b[:,occidxb]
nvira = orbva.shape[1]
nvirb = orbvb.shape[1]
nocca = orboa.shape[1]
noccb = orbob.shape[1]
h1a = h1[0].reshape(-1,nvira*nocca)
h1b = h1[1].reshape(-1,nvirb*noccb)
ncomp = h1a.shape[0]
mo1base = numpy.hstack((-h1a/diag[0],
-h1b/diag[1],
-h1a/diag[2],
-h1b/diag[3]))
offsets = numpy.cumsum((nocca*nvira, noccb*nvirb, nocca*nvira))
vresp = _gen_uhf_response(mf, hermi=0)
def vind(xys):
nz = len(xys)
dm1a = numpy.empty((nz,nao,nao))
dm1b = numpy.empty((nz,nao,nao))
for i in range(nz):
xa, xb, ya, yb = numpy.split(xys[i], offsets)
dmx = reduce(numpy.dot, (orbva, xa.reshape(nvira,nocca) , orboa.T))
dmy = reduce(numpy.dot, (orboa, ya.reshape(nvira,nocca).T, orbva.T))
dm1a[i] = dmx + dmy # AX + BY
dmx = reduce(numpy.dot, (orbvb, xb.reshape(nvirb,noccb) , orbob.T))
dmy = reduce(numpy.dot, (orbob, yb.reshape(nvirb,noccb).T, orbvb.T))
dm1b[i] = dmx + dmy # AX + BY
v1ao = vresp(numpy.stack((dm1a,dm1b)))
v1voa = lib.einsum('xpq,pi,qj->xij', v1ao[0], orbva, orboa).reshape(nz,-1)
v1vob = lib.einsum('xpq,pi,qj->xij', v1ao[1], orbvb, orbob).reshape(nz,-1)
v1ova = lib.einsum('xpq,pi,qj->xji', v1ao[0], orboa, orbva).reshape(nz,-1)
v1ovb = lib.einsum('xpq,pi,qj->xji', v1ao[1], orbob, orbvb).reshape(nz,-1)
for i in range(nz):
xa, xb, ya, yb = numpy.split(xys[i], offsets)
v1voa[i] += (e_ai_a - freq - diag[0]) * xa
v1voa[i] /= diag[0]
v1vob[i] += (e_ai_b - freq - diag[1]) * xb
v1vob[i] /= diag[1]
v1ova[i] += (e_ai_a + freq - diag[2]) * ya
v1ova[i] /= diag[2]
v1ovb[i] += (e_ai_b + freq - diag[3]) * yb
v1ovb[i] /= diag[3]
v = numpy.hstack((v1voa, v1vob, v1ova, v1ovb))
return v
# FIXME: krylov solver is not accurate enough for many freqs. Using tight
# tol and lindep could offer small help. A better linear equation solver
# is needed.
mo1 = lib.krylov(vind, mo1base, tol=tol, max_cycle=max_cycle,
hermi=hermi, lindep=1e-18, verbose=log)
log.timer('krylov solver in CPHF', *t0)
dm1a = numpy.empty((ncomp,nao,nao))
dm1b = numpy.empty((ncomp,nao,nao))
for i in range(ncomp):
xa, xb, ya, yb = numpy.split(mo1[i], offsets)
dmx = reduce(numpy.dot, (orbva, xa.reshape(nvira,nocca) *2, orboa.T))
dmy = reduce(numpy.dot, (orboa, ya.reshape(nvira,nocca).T*2, orbva.T))
dm1a[i] = dmx + dmy
dmx = reduce(numpy.dot, (orbvb, xb.reshape(nvirb,noccb) *2, orbob.T))
dmy = reduce(numpy.dot, (orbob, yb.reshape(nvirb,noccb).T*2, orbvb.T))
dm1b[i] = dmx + dmy
v1ao = vresp(numpy.stack((dm1a,dm1b)))
mo_e1_a = lib.einsum('xpq,pi,qj->xij', v1ao[0], orboa, orboa)
mo_e1_b = lib.einsum('xpq,pi,qj->xij', v1ao[1], orbob, orbob)
mo_e1 = (mo_e1_a, mo_e1_b)
xa, xb, ya, yb = numpy.split(mo1, offsets, axis=1)
mo1 = (xa.reshape(ncomp,nvira,nocca),
xb.reshape(ncomp,nvirb,noccb),
ya.reshape(ncomp,nvira,nocca),
yb.reshape(ncomp,nvirb,noccb))
return mo1, mo_e1
def polarizability_with_freq(polobj, freq=None):
from pyscf.prop.nmr import rhf as rhf_nmr
log = logger.new_logger(polobj)
mf = polobj._scf
mol = mf.mol
mo_energy = mf.mo_energy
mo_coeff = mf.mo_coeff
mo_occ = mf.mo_occ
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbva = mo0a[:,~occidxa]
orbob = mo0b[:, occidxb]
orbvb = mo0b[:,~occidxb]
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
int_r = mol.intor_symmetric('int1e_r', comp=3)
h1a = lib.einsum('xpq,pi,qj->xij', int_r, orbva.conj(), orboa)
h1b = lib.einsum('xpq,pi,qj->xij', int_r, orbvb.conj(), orbob)
mo1 = ucphf_with_freq(mf, mo_energy, mo_occ, (h1a,h1b), freq,
polobj.max_cycle_cphf, polobj.conv_tol,
verbose=log)[0]
# *-1 from the definition of dipole moment.
e2 = -numpy.einsum('xpi,ypi->xy', h1a, mo1[0])
e2 -= numpy.einsum('xpi,ypi->xy', h1b, mo1[1])
e2 -= numpy.einsum('xpi,ypi->xy', h1a, mo1[2])
e2 -= numpy.einsum('xpi,ypi->xy', h1b, mo1[3])
log.debug('Polarizability tensor with freq %s', freq)
log.debug('%s', e2)
return e2
class Polarizability(lib.StreamObject):
def __init__(self, mf):
mol = mf.mol
self.mol = mol
self.verbose = mol.verbose
self.stdout = mol.stdout
self._scf = mf
self.cphf = True
self.max_cycle_cphf = 20
self.conv_tol = 1e-9
self._keys = set(self.__dict__.keys())
def gen_vind(self, mf, mo_coeff, mo_occ):
'''Induced potential'''
vresp = _gen_uhf_response(mf, hermi=1)
occidxa = mo_occ[0] > 0
occidxb = mo_occ[1] > 0
mo0a, mo0b = mo_coeff
orboa = mo0a[:, occidxa]
orbob = mo0b[:, occidxb]
nocca = orboa.shape[1]
noccb = orbob.shape[1]
nmoa = mo0a.shape[1]
nmob = mo0b.shape[1]
def vind(mo1):
mo1 = mo1.reshape(-1,nmoa*nocca+nmob*noccb)
mo1a = mo1[:,:nmoa*nocca].reshape(-1,nmoa,nocca)
mo1b = mo1[:,nmoa*nocca:].reshape(-1,nmob,noccb)
dm1a = lib.einsum('xai,pa,qi->xpq', mo1a, mo0a, orboa.conj())
dm1b = lib.einsum('xai,pa,qi->xpq', mo1b, mo0b, orbob.conj())
dm1a = dm1a + dm1a.transpose(0,2,1).conj()
dm1b = dm1b + dm1b.transpose(0,2,1).conj()
v1ao = vresp(numpy.stack((dm1a,dm1b)))
v1a = lib.einsum('xpq,pi,qj->xij', v1ao[0], mo0a.conj(), orboa)
v1b = lib.einsum('xpq,pi,qj->xij', v1ao[1], mo0b.conj(), orbob)
v1mo = numpy.hstack((v1a.reshape(-1,nmoa*nocca),
v1b.reshape(-1,nmob*noccb)))
return v1mo.ravel()
return vind
polarizability = polarizability
polarizability_with_freq = polarizability_with_freq
hyper_polarizability = hyper_polarizability
from pyscf import scf
scf.uhf.UHF.Polarizability = lib.class_as_method(Polarizability)
if __name__ == '__main__':
from pyscf import gto
from pyscf import scf
mol = gto.Mole()
# Disagreement between analytical results and finite difference found for
# linear molecule
#mol.atom = '''h , 0. 0. 0.
# F , 0. 0. .917'''
mol.atom='''O 0. 0. 0.
H 0. -0.757 0.587
H 0. 0.757 0.587'''
mol.spin = 2
mol.basis = '631g'
mol.build()
mf = scf.UHF(mol).run(conv_tol=1e-14)
polar = mf.Polarizability().polarizability()
hpol = mf.Polarizability().hyper_polarizability()
print(polar)
mf.verbose = 0
charges = mol.atom_charges()
coords = mol.atom_coords()
charge_center = numpy.einsum('i,ix->x', charges, coords) / charges.sum()
with mol.with_common_orig(charge_center):
ao_dip = mol.intor_symmetric('int1e_r', comp=3)
h1 = mf.get_hcore()
def apply_E(E):
mf.get_hcore = lambda *args, **kwargs: h1 + numpy.einsum('x,xij->ij', E, ao_dip)
mf.run(conv_tol=1e-14)
return mf.dip_moment(mol, mf.make_rdm1(), unit='AU', verbose=0)
e1 = apply_E([ 0.0001, 0, 0])
e2 = apply_E([-0.0001, 0, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0.0001, 0])
e2 = apply_E([0,-0.0001, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0, 0.0001])
e2 = apply_E([0, 0,-0.0001])
print((e1 - e2) / 0.0002)
# Small discrepancy found between analytical derivatives and finite
# differences
print(hpol)
def apply_E(E):
mf.get_hcore = lambda *args, **kwargs: h1 + numpy.einsum('x,xij->ij', E, ao_dip)
mf.run(conv_tol=1e-14)
return Polarizability(mf).polarizability()
e1 = apply_E([ 0.0001, 0, 0])
e2 = apply_E([-0.0001, 0, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0.0001, 0])
e2 = apply_E([0,-0.0001, 0])
print((e1 - e2) / 0.0002)
e1 = apply_E([0, 0, 0.0001])
e2 = apply_E([0, 0,-0.0001])
print((e1 - e2) / 0.0002)
print(Polarizability(mf).polarizability())
print(Polarizability(mf).polarizability_with_freq(freq= 0.))
print(Polarizability(mf).polarizability_with_freq(freq= 0.1))
print(Polarizability(mf).polarizability_with_freq(freq=-0.1))
| 36.674757 | 88 | 0.59325 |
3e410612bdd682001e01183ffbb9915e5bcb609c
| 4,357 |
py
|
Python
|
functions/notify_slack.py
|
kabisa/terraform-aws-notify-slack
|
560695f55f0a0e4d61934eca2ee9cd371b50f124
|
[
"Apache-2.0"
] | null | null | null |
functions/notify_slack.py
|
kabisa/terraform-aws-notify-slack
|
560695f55f0a0e4d61934eca2ee9cd371b50f124
|
[
"Apache-2.0"
] | null | null | null |
functions/notify_slack.py
|
kabisa/terraform-aws-notify-slack
|
560695f55f0a0e4d61934eca2ee9cd371b50f124
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import os, boto3, json, base64
import urllib.request, urllib.parse
import logging
# Decrypt encrypted URL with KMS
def decrypt(encrypted_url):
region = os.environ['AWS_REGION']
try:
kms = boto3.client('kms', region_name=region)
plaintext = kms.decrypt(CiphertextBlob=base64.b64decode(encrypted_url))['Plaintext']
return plaintext.decode()
except Exception:
logging.exception("Failed to decrypt URL with KMS")
# Send a message to a slack channel
def notify_slack(subject, message, region):
slack_url = os.environ['SLACK_WEBHOOK_URL']
if not slack_url.startswith("http"):
slack_url = decrypt(slack_url)
slack_channels = os.environ['SLACK_CHANNELS'].replace(' ', '').split(",")
slack_username = os.environ['SLACK_USERNAME']
slack_emoji = os.environ['SLACK_EMOJI']
for slack_channel in slack_channels:
payload = {
"channel": slack_channel,
"username": slack_username,
"icon_emoji": slack_emoji,
"attachments": []
}
if type(message) is str:
try:
message = json.loads(message);
except json.JSONDecodeError as err:
logging.exception(f'JSON decode error: {err}')
payload = format_message(payload, subject, message, region);
data = urllib.parse.urlencode({"payload": json.dumps(payload)}).encode("utf-8")
req = urllib.request.Request(slack_url)
urllib.request.urlopen(req, data)
def lambda_handler(event, context):
subject = event['Records'][0]['Sns']['Subject']
message = event['Records'][0]['Sns']['Message']
region = event['Records'][0]['Sns']['TopicArn'].split(":")[3]
notify_slack(subject, message, region)
return message
def format_message(payload, subject, message, region):
if "AlarmName" in message:
#cloudwatch notification
return cloudwatch_notification(payload, message, region);
else:
return json_to_table_notification(payload, subject, message);
def cloudwatch_notification(payload, message, region):
states = {'OK': 'good', 'INSUFFICIENT_DATA': 'warning', 'ALARM': 'danger'}
attachments = {
"color": states[message['NewStateValue']],
"fallback": "Alarm {} triggered".format(message['AlarmName']),
"footer": "AWS SNS Notification",
"footer_icon": "https://www.kabisa.nl/favicon-f61d5679.png",
"fields": [
{ "title": "Alarm Name", "value": message['AlarmName'], "short": True },
{ "title": "Alarm Description", "value": message['AlarmDescription'], "short": False},
{ "title": "Alarm reason", "value": message['NewStateReason'], "short": False},
{ "title": "Old State", "value": message['OldStateValue'], "short": True },
{ "title": "Current State", "value": message['NewStateValue'], "short": True },
{
"title": "Link to Alarm",
"value": "https://console.aws.amazon.com/cloudwatch/home?region=" + region + "#alarm:alarmFilter=ANY;name=" + urllib.parse.quote_plus(message['AlarmName']),
"short": False
}
]
}
payload['text'] = attachments["fallback"];
payload['attachments'].append(attachments);
return payload;
def json_to_table_notification(payload, subject, message):
fields = [];
for key, value in message.items():
if isinstance(value, str) and len(value) > 30:
fields.append({"title":key, "value": value, "short": False});
else:
fields.append({"title":key, "value": value, "short": True});
attachments = {
"fallback": "A new message",
"fields": fields,
"footer": "AWS SNS Notification",
"footer_icon": "https://www.kabisa.nl/favicon-f61d5679.png"
}
payload['text'] = subject;
payload['attachments'].append(attachments);
return payload;
#notify_slack({"AlarmName":"Example","AlarmDescription":"Example alarm description.","AWSAccountId":"000000000000","NewStateValue":"ALARM","NewStateReason":"Threshold Crossed","StateChangeTime":"2017-01-12T16:30:42.236+0000","Region":"EU - Ireland","OldStateValue":"OK"}, "eu-west-1")
| 37.560345 | 284 | 0.616479 |
9e699202db58b6fd48063e0da33a56d017f3cd4b
| 3,759 |
py
|
Python
|
google/ads/googleads/v4/services/services/hotel_performance_view_service/transports/base.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/hotel_performance_view_service/transports/base.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
google/ads/googleads/v4/services/services/hotel_performance_view_service/transports/base.py
|
batardo/google-ads-python
|
a39748521847e85138fca593f3be2681352ad024
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.ads.googleads.v4.resources.types import hotel_performance_view
from google.ads.googleads.v4.services.types import (
hotel_performance_view_service,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-ads-googleads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class HotelPerformanceViewServiceTransport(metaclass=abc.ABCMeta):
"""Abstract transport class for HotelPerformanceViewService."""
AUTH_SCOPES = ("https://www.googleapis.com/auth/adwords",)
def __init__(
self,
*,
host: str = "googleads.googleapis.com",
credentials: credentials.Credentials = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials is None:
credentials, _ = auth.default(scopes=self.AUTH_SCOPES)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precomputed wrapped methods
self._wrapped_methods = {
self.get_hotel_performance_view: gapic_v1.method.wrap_method(
self.get_hotel_performance_view,
default_timeout=None,
client_info=client_info,
),
}
@property
def get_hotel_performance_view(
self,
) -> typing.Callable[
[hotel_performance_view_service.GetHotelPerformanceViewRequest],
hotel_performance_view.HotelPerformanceView,
]:
raise NotImplementedError
__all__ = ("HotelPerformanceViewServiceTransport",)
| 35.130841 | 78 | 0.681564 |
f6f0978e29133d31324834ede837c8aa68771d8f
| 1,184 |
py
|
Python
|
textless/data/collater_utils.py
|
an918tw/textlesslib
|
d9fcccefbd76b5d6dc6f1df0b8c743e730038f1f
|
[
"MIT"
] | 198 |
2022-02-14T21:48:11.000Z
|
2022-03-31T22:49:30.000Z
|
textless/data/collater_utils.py
|
an918tw/textlesslib
|
d9fcccefbd76b5d6dc6f1df0b8c743e730038f1f
|
[
"MIT"
] | 2 |
2022-03-07T16:52:30.000Z
|
2022-03-17T01:12:47.000Z
|
textless/data/collater_utils.py
|
an918tw/textlesslib
|
d9fcccefbd76b5d6dc6f1df0b8c743e730038f1f
|
[
"MIT"
] | 9 |
2022-02-16T09:43:04.000Z
|
2022-03-31T23:55:43.000Z
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
def collate_tensors(stream, pad):
"""
>>> tensors = [torch.tensor(x) for x in [[1,2,3], [1]]]
>>> pad = 0
>>> collate_tensors(tensors, pad)
tensor([[1, 2, 3],
[1, 0, 0]])
"""
assert len(stream) > 0
length = max(v.size(0) for v in stream)
n_samples = len(stream)
collated = stream[0].new_full((n_samples, length), pad)
for i, v in enumerate(stream):
collated[i, : v.size(0)] = v
return collated
def wrap_bos_eos(units, durations, f0, dense, bos, eos):
assert units.size(0) == durations.size(0) == dense.size(0)
if f0 is not None:
assert units.size(0) == f0.size(0)
units = torch.cat([bos, units, eos])
z = torch.zeros_like(durations[0:1])
durations = torch.cat([z, durations, z])
if f0 is not None:
z = torch.zeros_like(f0[0:1])
f0 = torch.cat([z, f0, z])
z = torch.zeros_like(dense[0:1, :])
dense = torch.cat([z, dense, z])
return units, durations, f0, dense
| 25.73913 | 65 | 0.599662 |
74a16987f6f7e48405d780e1b5cd78b0d1176988
| 5,174 |
py
|
Python
|
lib/matplotlib/testing/jpl_units/StrConverter.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 35 |
2015-10-23T08:15:36.000Z
|
2022-02-03T10:17:15.000Z
|
lib/matplotlib/testing/jpl_units/StrConverter.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 3 |
2015-09-17T16:27:45.000Z
|
2018-07-31T05:59:33.000Z
|
lib/matplotlib/testing/jpl_units/StrConverter.py
|
pierre-haessig/matplotlib
|
0d945044ca3fbf98cad55912584ef80911f330c6
|
[
"MIT",
"PSF-2.0",
"BSD-3-Clause"
] | 25 |
2016-01-18T12:19:11.000Z
|
2021-12-11T15:45:17.000Z
|
#===========================================================================
#
# StrConverter
#
#===========================================================================
"""StrConverter module containing class StrConverter."""
#===========================================================================
# Place all imports after here.
#
from __future__ import print_function
import matplotlib.units as units
from matplotlib.cbook import iterable
# Place all imports before here.
#===========================================================================
__all__ = [ 'StrConverter' ]
#===========================================================================
class StrConverter( units.ConversionInterface ):
""": A matplotlib converter class. Provides matplotlib conversion
functionality for string data values.
Valid units for string are:
- 'indexed' : Values are indexed as they are specified for plotting.
- 'sorted' : Values are sorted alphanumerically.
- 'inverted' : Values are inverted so that the first value is on top.
- 'sorted-inverted' : A combination of 'sorted' and 'inverted'
"""
#------------------------------------------------------------------------
@staticmethod
def axisinfo( unit, axis ):
""": Returns information on how to handle an axis that has string data.
= INPUT VARIABLES
- axis The axis using this converter.
- unit The units to use for a axis with string data.
= RETURN VALUE
- Returns a matplotlib AxisInfo data structure that contains
minor/major formatters, major/minor locators, and default
label information.
"""
return None
#------------------------------------------------------------------------
@staticmethod
def convert( value, unit, axis ):
""": Convert value using unit to a float. If value is a sequence, return
the converted sequence.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need to be converted.
- unit The units to use for a axis with Epoch data.
= RETURN VALUE
- Returns the value parameter converted to floats.
"""
if ( units.ConversionInterface.is_numlike( value ) ):
return value
if ( value == [] ):
return []
# we delay loading to make matplotlib happy
ax = axis.axes
if axis is ax.get_xaxis():
isXAxis = True
else:
isXAxis = False
axis.get_major_ticks()
ticks = axis.get_ticklocs()
labels = axis.get_ticklabels()
labels = [ l.get_text() for l in labels if l.get_text() ]
if ( not labels ):
ticks = []
labels = []
if ( not iterable( value ) ):
value = [ value ]
newValues = []
for v in value:
if ( (v not in labels) and (v not in newValues) ):
newValues.append( v )
for v in newValues:
if ( labels ):
labels.append( v )
else:
labels = [ v ]
#DISABLED: This is disabled because matplotlib bar plots do not
#DISABLED: recalculate the unit conversion of the data values
#DISABLED: this is due to design and is not really a bug.
#DISABLED: If this gets changed, then we can activate the following
#DISABLED: block of code. Note that this works for line plots.
#DISABLED if ( unit ):
#DISABLED if ( unit.find( "sorted" ) > -1 ):
#DISABLED labels.sort()
#DISABLED if ( unit.find( "inverted" ) > -1 ):
#DISABLED labels = labels[ ::-1 ]
# add padding (so they do not appear on the axes themselves)
labels = [ '' ] + labels + [ '' ]
ticks = range( len(labels) )
ticks[0] = 0.5
ticks[-1] = ticks[-1] - 0.5
axis.set_ticks( ticks )
axis.set_ticklabels( labels )
# we have to do the following lines to make ax.autoscale_view work
loc = axis.get_major_locator()
loc.set_bounds( ticks[0], ticks[-1] )
if ( isXAxis ):
ax.set_xlim( ticks[0], ticks[-1] )
else:
ax.set_ylim( ticks[0], ticks[-1] )
result = []
for v in value:
# If v is not in labels then something went wrong with adding new
# labels to the list of old labels.
errmsg = "This is due to a logic error in the StrConverter class. "
errmsg += "Please report this error and its message in bugzilla."
assert ( v in labels ), errmsg
result.append( ticks[ labels.index(v) ] )
ax.viewLim.ignore(-1)
return result
#------------------------------------------------------------------------
@staticmethod
def default_units( value, axis ):
""": Return the default unit for value, or None.
= INPUT VARIABLES
- axis The axis using this converter.
- value The value or list of values that need units.
= RETURN VALUE
- Returns the default units to use for value.
Return the default unit for value, or None.
"""
# The default behavior for string indexing.
return "indexed"
| 32.136646 | 79 | 0.536336 |
225758df209fdc21d3d7b5373f0662b169f6ecec
| 3,111 |
py
|
Python
|
data_utils.py
|
BloomBabe/Underwater-Enhancing
|
6154cd11c402fdc3f353dee9dc7c4166a1f36751
|
[
"MIT"
] | 2 |
2021-02-18T04:10:31.000Z
|
2021-03-04T05:27:58.000Z
|
data_utils.py
|
BloomBabe/Underwater-Enhancing
|
6154cd11c402fdc3f353dee9dc7c4166a1f36751
|
[
"MIT"
] | null | null | null |
data_utils.py
|
BloomBabe/Underwater-Enhancing
|
6154cd11c402fdc3f353dee9dc7c4166a1f36751
|
[
"MIT"
] | 1 |
2021-03-04T05:27:59.000Z
|
2021-03-04T05:27:59.000Z
|
import numpy as np
import torch
import random
from skimage import io, transform
import torch.nn.functional as F
from torchvision import transforms
torch.manual_seed(17)
random.seed(42)
class Resize(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, output_size):
assert isinstance(output_size, (int, tuple))
self.output_size = output_size
def _resize(self, image):
h, w = image.size()[1:3]
if isinstance(self.output_size, int):
if h > w:
new_h, new_w = self.output_size * h / w, self.output_size
else:
new_h, new_w = self.output_size, self.output_size * w / h
else:
new_h, new_w = self.output_size
new_h, new_w = int(new_h), int(new_w)
img = F.interpolate(image.unsqueeze(0), (new_h, new_w))
return img.squeeze(0)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
new_raw_image = self._resize(raw_image)
new_ref_image = self._resize(ref_image)
return {'raw_image': new_raw_image, 'ref_image': new_ref_image}
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def _transpose(self, image, channels=(2, 0, 1)):
return image.transpose(channels)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
new_raw_image = self._transpose(raw_image)
new_ref_image = self._transpose(ref_image)
return {'raw_image': torch.from_numpy(new_raw_image).float(),
'ref_image': torch.from_numpy(new_ref_image).float()}
class Normalize(object):
"""Normalize a tensor image with mean and standard deviation."""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def _normalize(self, image):
return transforms.Normalize(self.mean, self.std)(image)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
norm_raw_image = self._normalize(raw_image)
norm_ref_image = self._normalize(ref_image)
return {'raw_image': norm_raw_image,
'ref_image': norm_ref_image}
class RandomRotation(object):
"""Rotate the image by angle."""
def _rotate(self, image, angle):
return transforms.functional.rotate(image, angle)
def __call__(self, sample):
raw_image, ref_image = sample['raw_image'], sample['ref_image']
angle = random.randint(0, 360)
rotate_raw_image = self._rotate(raw_image, angle)
rotate_ref_image = self._rotate(ref_image, angle)
return {'raw_image': rotate_raw_image,
'ref_image': rotate_ref_image}
| 33.815217 | 77 | 0.643202 |
3c89e384fcde93261eb1c4457aea9ecd50c9324b
| 6,771 |
py
|
Python
|
create-toc.py
|
the-codeslinger/music-management-scripts
|
a35d568e5fc550d5444db59448844980a44e1cbc
|
[
"Apache-2.0"
] | 1 |
2019-07-06T08:07:28.000Z
|
2019-07-06T08:07:28.000Z
|
create-toc.py
|
the-codeslinger/music-management-scripts
|
a35d568e5fc550d5444db59448844980a44e1cbc
|
[
"Apache-2.0"
] | null | null | null |
create-toc.py
|
the-codeslinger/music-management-scripts
|
a35d568e5fc550d5444db59448844980a44e1cbc
|
[
"Apache-2.0"
] | null | null | null |
# Scan directories for audio files with a given extension and read the
# meta data from the file"s name to create a table of contents JSON
# file from that information. This file can later be used by
# conversion tools/scripts.
#
# Usage example:
# python3 create-toc.py \
# -s Music \
# -d "#" \
# -f "artist,album,year,genre,track,title" \
# -r \
# -t wav
#
# See `python3 create-toc.py --help` for details.
import os
import json
import codecs
import argparse
from pathvalidate import sanitize_filename
ARTIST_TAG_NAME = "artist"
ALBUM_TAG_NAME = "album"
GENRE_TAG_NAME = "genre"
YEAR_TAG_NAME = "year"
TRACK_TAG_NAME = "track"
TITLE_TAG_NAME = "title"
TRACK_LIST_NAME = "tracks"
FILENAME_TAG_NAME = "filename"
LONG_FILENAME_TAG_NAME = "long"
SHORT_FILENAME_TAG_NAME = "short"
FORWARD_SLASH_STRING = "/"
FORWARD_SLASH_CODE = "&47;"
COLON_STRING = ":"
COLON_CODE = "&58;"
QUESTION_MARK_STRING = "?"
QUESTION_MARK_CODE = "&63;"
BACKSLASH_STRING = "\\"
BACKSLASH_CODE = "&92;"
HASH_STRING = "#"
HASH_CODE = "&35;"
TOC_FILENAME = "ToC.json"
def is_hidden(name):
return name[0] == "."
def get_artist(file_tags):
return file_tags[ARTIST_TAG_NAME] if ARTIST_TAG_NAME in file_tags else ""
def get_album(file_tags):
return file_tags[ALBUM_TAG_NAME] if ALBUM_TAG_NAME in file_tags else ""
def get_genre(file_tags):
return file_tags[GENRE_TAG_NAME] if GENRE_TAG_NAME in file_tags else ""
def get_year(file_tags):
return file_tags[YEAR_TAG_NAME] if YEAR_TAG_NAME in file_tags else ""
def get_track(file_tags):
return file_tags[TRACK_TAG_NAME].zfill(2) if TRACK_TAG_NAME in file_tags else ""
def get_title(file_tags):
return file_tags[TITLE_TAG_NAME] if TITLE_TAG_NAME in file_tags else ""
def replace_specials(value):
return value \
.replace(FORWARD_SLASH_CODE, FORWARD_SLASH_STRING) \
.replace(COLON_CODE, COLON_STRING) \
.replace(QUESTION_MARK_CODE, QUESTION_MARK_STRING) \
.replace(BACKSLASH_CODE, BACKSLASH_STRING) \
.replace(HASH_CODE, HASH_STRING)
def simple_filename(file_tags, type):
track = get_track(file_tags)
title = get_title(file_tags)
filename = ""
if track and title:
filename = track + " - " + title + "." + type
elif track and not title:
filename = track + "." + type
elif not track and title:
filename = title + "." + type
# Sanitize_filename removes my coded special characters.
filename = filename \
.replace("\"", "") \
.replace(",", "") \
.replace("!", "") \
.replace(FORWARD_SLASH_STRING, "") \
.replace(COLON_STRING, "") \
.replace(QUESTION_MARK_STRING, "") \
.replace(BACKSLASH_STRING, "") \
.replace(HASH_STRING, "")
return sanitize_filename(filename)
def assert_and_fill_metadata(record_metadata, tag_name, tag_value):
if not record_metadata[tag_name]:
record_metadata[tag_name] = tag_value
else:
assert record_metadata[tag_name] == tag_value, f"File contains different {tag_name}"
def fill_record_metadata(record_metadata, file_tags):
assert_and_fill_metadata(record_metadata, ARTIST_TAG_NAME, get_artist(file_tags))
assert_and_fill_metadata(record_metadata, ALBUM_TAG_NAME, get_album(file_tags))
assert_and_fill_metadata(record_metadata, GENRE_TAG_NAME, get_genre(file_tags))
assert_and_fill_metadata(record_metadata, YEAR_TAG_NAME, get_year(file_tags))
def remove_redundant_tags(file_tags):
file_tags.pop(ARTIST_TAG_NAME, None)
file_tags.pop(ALBUM_TAG_NAME, None)
file_tags.pop(GENRE_TAG_NAME, None)
file_tags.pop(YEAR_TAG_NAME, None)
def read_tags(filename, config):
format_list = config["format"]
tag_list = filename.split(config["delim"])
assert len(tag_list) <= len(format_list), f"Number tags in file {filename} larger than expected according to format"
file_tags = {}
for index, value in enumerate(tag_list):
tag_name = format_list[index]
file_tags[tag_name] = replace_specials(value)
return file_tags
def write_toc_file(dir, record_metadata):
with codecs.open(os.path.join(dir, TOC_FILENAME), "w", encoding="UTF-8") as json_file:
json.dump(record_metadata, json_file, indent=2, ensure_ascii=False)
def rename_files(dir, record_metadata):
for track_info in record_metadata[TRACK_LIST_NAME]:
long_name = track_info[FILENAME_TAG_NAME][LONG_FILENAME_TAG_NAME]
short_name = track_info[FILENAME_TAG_NAME][SHORT_FILENAME_TAG_NAME]
os.rename(os.path.join(dir, long_name), os.path.join(dir, short_name))
def read_dir(subdir, config):
print(subdir)
if os.path.exists(os.path.join(subdir, TOC_FILENAME)):
print(f"Folder {subdir} already contains ToC")
return
with os.scandir(subdir) as iter:
record_metadata = {
ARTIST_TAG_NAME: "",
ALBUM_TAG_NAME: "",
GENRE_TAG_NAME: "",
YEAR_TAG_NAME: "",
TRACK_LIST_NAME: []
}
for entry in iter:
type = config["type"]
if entry.is_file() and entry.name.endswith("." + type):
file = entry.name
file_no_ext = file[:-1 * (1 + len(type))]
file_tags = read_tags(file_no_ext, config)
file_tags[FILENAME_TAG_NAME] = {
LONG_FILENAME_TAG_NAME: entry.name,
SHORT_FILENAME_TAG_NAME: simple_filename(file_tags, type)
}
fill_record_metadata(record_metadata, file_tags)
remove_redundant_tags(file_tags)
record_metadata[TRACK_LIST_NAME].append(file_tags)
if record_metadata[TRACK_LIST_NAME]:
write_toc_file(subdir, record_metadata)
rename_files(subdir, record_metadata)
def read_recursive(config):
for subdir, _, _ in os.walk(config["source"]):
read_dir(subdir, config)
def read_config(config_path):
with codecs.open(config_path, "r", encoding="UTF-8") as f:
return json.load(f)
def make_abs_config_path(config):
config_path = config
if not os.path.isabs(config):
config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), config)
return config_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"-c", "--config",
help="""Optional configuration file. Defaults to {script-dir}/etc/create-toc.json""",
default="etc/create-toc.json")
return parser.parse_args()
args = parse_args()
config = read_config(make_abs_config_path(args.config))
if config["recurse"] is True:
read_recursive(config)
else:
read_dir(config["source"], config)
| 32.552885 | 120 | 0.678482 |
1084e40fa035a9f1746cd3192dd7b12b238dae76
| 5,035 |
py
|
Python
|
alert_service_sdk/model/inspection/val_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | 5 |
2019-07-31T04:11:05.000Z
|
2021-01-07T03:23:20.000Z
|
alert_service_sdk/model/inspection/val_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
alert_service_sdk/model/inspection/val_pb2.py
|
easyopsapis/easyops-api-python
|
adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: val.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from alert_service_sdk.model.inspection import condition_pb2 as alert__service__sdk_dot_model_dot_inspection_dot_condition__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='val.proto',
package='inspection',
syntax='proto3',
serialized_options=_b('ZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspection'),
serialized_pb=_b('\n\tval.proto\x12\ninspection\x1a\x32\x61lert_service_sdk/model/inspection/condition.proto\"\x98\x01\n\rInspectionVal\x12\n\n\x02id\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\x12\x0c\n\x04memo\x18\x03 \x01(\t\x12\x0c\n\x04type\x18\x04 \x01(\t\x12\x0c\n\x04unit\x18\x05 \x01(\t\x12\x0e\n\x06weight\x18\x06 \x01(\x05\x12\x33\n\nconditions\x18\x07 \x03(\x0b\x32\x1f.inspection.InspectionConditionBFZDgo.easyops.local/contracts/protorepo-models/easyops/model/inspectionb\x06proto3')
,
dependencies=[alert__service__sdk_dot_model_dot_inspection_dot_condition__pb2.DESCRIPTOR,])
_INSPECTIONVAL = _descriptor.Descriptor(
name='InspectionVal',
full_name='inspection.InspectionVal',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='id', full_name='inspection.InspectionVal.id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='name', full_name='inspection.InspectionVal.name', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memo', full_name='inspection.InspectionVal.memo', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='type', full_name='inspection.InspectionVal.type', index=3,
number=4, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='unit', full_name='inspection.InspectionVal.unit', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight', full_name='inspection.InspectionVal.weight', index=5,
number=6, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='conditions', full_name='inspection.InspectionVal.conditions', index=6,
number=7, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=78,
serialized_end=230,
)
_INSPECTIONVAL.fields_by_name['conditions'].message_type = alert__service__sdk_dot_model_dot_inspection_dot_condition__pb2._INSPECTIONCONDITION
DESCRIPTOR.message_types_by_name['InspectionVal'] = _INSPECTIONVAL
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
InspectionVal = _reflection.GeneratedProtocolMessageType('InspectionVal', (_message.Message,), {
'DESCRIPTOR' : _INSPECTIONVAL,
'__module__' : 'val_pb2'
# @@protoc_insertion_point(class_scope:inspection.InspectionVal)
})
_sym_db.RegisterMessage(InspectionVal)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| 43.034188 | 505 | 0.753923 |
56474489db784a9641bcbf3b40ee07c8c756e390
| 550 |
py
|
Python
|
cnlp_annotator/task_center/task_center_webapi/manage.py
|
szj2ys/cnlp_annotator
|
1837d952a73ffe97b0e5c3523d51896e92572ce1
|
[
"Apache-2.0"
] | 915 |
2018-07-25T07:30:27.000Z
|
2022-03-25T14:09:17.000Z
|
cnlp_annotator/task_center/task_center_webapi/manage.py
|
szj2ys/cnlp_annotator
|
1837d952a73ffe97b0e5c3523d51896e92572ce1
|
[
"Apache-2.0"
] | 20 |
2018-10-12T15:48:56.000Z
|
2021-09-27T09:12:01.000Z
|
cnlp_annotator/task_center/task_center_webapi/manage.py
|
szj2ys/cnlp_annotator
|
1837d952a73ffe97b0e5c3523d51896e92572ce1
|
[
"Apache-2.0"
] | 204 |
2018-07-30T06:52:29.000Z
|
2022-03-03T15:18:39.000Z
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "task_center_webapi.settings")
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| 34.375 | 82 | 0.692727 |
177a19b9c71cf4c467d6c933abacbd9faf1d7259
| 4,086 |
py
|
Python
|
polydomino/app.py
|
PsiACE/polydomino
|
ade7cdb303cb4073d8c075659a5494392d31f8b4
|
[
"MIT"
] | null | null | null |
polydomino/app.py
|
PsiACE/polydomino
|
ade7cdb303cb4073d8c075659a5494392d31f8b4
|
[
"MIT"
] | null | null | null |
polydomino/app.py
|
PsiACE/polydomino
|
ade7cdb303cb4073d8c075659a5494392d31f8b4
|
[
"MIT"
] | null | null | null |
import os
import sys
import time
from datetime import timedelta
import cv2
from dotenv import find_dotenv, load_dotenv
from flask import Flask, jsonify, render_template, request
from werkzeug.utils import secure_filename
from colordescriptor import ColorDescriptor
from searcher import Searcher
load_dotenv(find_dotenv())
INDEX_PATH = os.environ.get("INDEX_PATH")
FEATURES = os.environ.get("FEATURES")
SEARCHER = os.environ.get("SEARCHER")
# 设置允许的文件格式
ALLOWED_EXTENSIONS = set(["png", "jpg", "JPG", "PNG", "bmp"])
def allowed_file(filename):
return "." in filename and filename.rsplit(".", 1)[1] in ALLOWED_EXTENSIONS
# create flask instance
app = Flask(__name__)
app.jinja_env.filters["zip"] = zip
INDEX = os.path.join(os.path.dirname(__file__), INDEX_PATH)
# main route
@app.route("/")
def index():
return render_template("index.html")
@app.route("/demo")
def demo():
return render_template("demo.html")
@app.route("/query", methods=["GET", "POST"])
def query():
if request.method == "POST":
f = request.files["file"]
if not (f and allowed_file(f.filename)):
return jsonify({"error": 1001, "msg": "请检查上传的图片类型,仅限于png、PNG、jpg、JPG、bmp"})
basepath = os.path.dirname(__file__) # 当前文件所在路径
# 注意:没有的文件夹一定要先创建,不然会提示没有该路径
upload_path = os.path.join(
basepath, "static/queries", secure_filename(f.filename)
)
# upload_path = os.path.join(basepath, 'static/images','test.jpg') #注意:没有的文件夹一定要先创建,不然会提示没有该路径
f.save(upload_path)
import cv2
# 使用Opencv转换一下图片格式和名称
img = cv2.imread(upload_path)
cv2.imwrite(os.path.join(basepath, "static/queries", "test.jpg"), img)
RESULTS_ARRAY = []
SCORE_ARRAY = []
cd = ColorDescriptor((8, 12, 3))
features = get_features(cd, FEATURES, img)
searcher = Searcher(INDEX)
results = searcher.search(features, SEARCHER)
# loop over the results, displaying the score and image name
for (score, resultID) in results:
RESULTS_ARRAY.append(resultID)
SCORE_ARRAY.append(score)
return render_template(
"query_ok.html",
results=(RESULTS_ARRAY[:5]),
scores=(SCORE_ARRAY[:5]),
name=f.filename,
)
return render_template("query.html")
# search route
@app.route("/search", methods=["POST"])
def search():
if request.method == "POST":
RESULTS_ARRAY = []
# get url
image_url = request.form.get("img")
try:
# initialize the image descriptor
cd = ColorDescriptor((8, 12, 3))
import cv2
image_url = "polydomino/" + image_url[1:]
query = cv2.imread(image_url)
features = get_features(cd, FEATURES, query)
# perform the search
searcher = Searcher(INDEX)
results = searcher.search(features, SEARCHER)
# loop over the results, displaying the score and image name
for (score, resultID) in results:
RESULTS_ARRAY.append({"image": str(resultID), "score": str(score)})
# return success
return jsonify(results=(RESULTS_ARRAY[:5]))
except:
# return error
jsonify({"sorry": "Sorry, no results! Please try again."}), 500
def get_features(cd, method, query):
if method == "color-moments":
return cd.color_moments(query)
elif method == "hsv-describe":
return cd.hsv_describe(query)
elif method == "gray-matrix":
return cd.gray_matrix(query)
elif method == "humoments":
return cd.humoments(query)
elif method == "ahash":
return cd.ahash(query)
elif method == "phash":
return cd.phash(query)
elif method == "mse":
return cd.mse(query)
elif method == "dhash":
return cd.dhash(query)
elif method == "hog":
return cd.hog(query)
else:
return
# run!
if __name__ == "__main__":
app.run("127.0.0.1", debug=True)
| 26.36129 | 103 | 0.615027 |
ddb8fec7f1bc312c6beebb7a5ed95a8e6fabcb44
| 3,126 |
py
|
Python
|
app/routes/flush.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | null | null | null |
app/routes/flush.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | null | null | null |
app/routes/flush.py
|
petechd/eq-questionnaire-runner
|
1c5b182a7f8bc878cfdd767ae080410fa679abd6
|
[
"MIT"
] | null | null | null |
from flask import Blueprint, Response, current_app, request, session
from sdc.crypto.decrypter import decrypt
from sdc.crypto.encrypter import encrypt
from structlog import get_logger
from app.authentication.user import User
from app.globals import get_answer_store, get_metadata, get_questionnaire_store
from app.keys import KEY_PURPOSE_AUTHENTICATION, KEY_PURPOSE_SUBMISSION
from app.questionnaire.router import Router
from app.submitter.converter import convert_answers
from app.submitter.submission_failed import SubmissionFailedException
from app.utilities.json import json_dumps
from app.utilities.schema import load_schema_from_metadata
flush_blueprint = Blueprint("flush", __name__)
logger = get_logger()
@flush_blueprint.route("/flush", methods=["POST"])
def flush_data():
if session:
session.clear()
encrypted_token = request.args.get("token")
if not encrypted_token or encrypted_token is None:
return Response(status=403)
decrypted_token = decrypt(
token=encrypted_token,
key_store=current_app.eq["key_store"],
key_purpose=KEY_PURPOSE_AUTHENTICATION,
leeway=current_app.config["EQ_JWT_LEEWAY_IN_SECONDS"],
)
roles = decrypted_token.get("roles")
if roles and "flusher" in roles:
user = _get_user(decrypted_token["response_id"])
metadata = get_metadata(user)
if "tx_id" in metadata:
logger.bind(tx_id=metadata["tx_id"])
if _submit_data(user):
return Response(status=200)
return Response(status=404)
return Response(status=403)
def _submit_data(user):
answer_store = get_answer_store(user)
if answer_store:
questionnaire_store = get_questionnaire_store(user.user_id, user.user_ik)
answer_store = questionnaire_store.answer_store
metadata = questionnaire_store.metadata
progress_store = questionnaire_store.progress_store
list_store = questionnaire_store.list_store
schema = load_schema_from_metadata(metadata)
router = Router(schema, answer_store, list_store, progress_store, metadata)
full_routing_path = router.full_routing_path()
message = json_dumps(
convert_answers(
schema, questionnaire_store, full_routing_path, flushed=True
)
)
encrypted_message = encrypt(
message, current_app.eq["key_store"], KEY_PURPOSE_SUBMISSION
)
sent = current_app.eq["submitter"].send_message(
encrypted_message,
tx_id=metadata.get("tx_id"),
case_id=metadata["case_id"],
)
if not sent:
raise SubmissionFailedException()
get_questionnaire_store(user.user_id, user.user_ik).delete()
logger.info("successfully flushed answers")
return True
logger.info("no answers found to flush")
return False
def _get_user(response_id):
id_generator = current_app.eq["id_generator"]
user_id = id_generator.generate_id(response_id)
user_ik = id_generator.generate_ik(response_id)
return User(user_id, user_ik)
| 32.226804 | 83 | 0.712412 |
c952e3e103322bf46aa65d8578978aed5398af2f
| 104 |
py
|
Python
|
invent-your-own-computer-games-with-python/hello.py
|
learning-game-development/learning-python-game-development
|
326b72eadab0bfb14f70f295b492f76d139dde33
|
[
"Unlicense"
] | null | null | null |
invent-your-own-computer-games-with-python/hello.py
|
learning-game-development/learning-python-game-development
|
326b72eadab0bfb14f70f295b492f76d139dde33
|
[
"Unlicense"
] | null | null | null |
invent-your-own-computer-games-with-python/hello.py
|
learning-game-development/learning-python-game-development
|
326b72eadab0bfb14f70f295b492f76d139dde33
|
[
"Unlicense"
] | null | null | null |
print("Hello world!")
print("What is your name?")
name = input()
print("It is good to meet you,", name)
| 20.8 | 38 | 0.663462 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.