blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
โ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
โ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
โ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
bb6fb54135f828268b32553a317043acc288650b | a2ab6c23253badb3be54b19ba061e1aeaac6a8cd | /utils/image_annotator.py | 6d3168d8b386e8ae48a956d41739e56d99f89255 | [] | no_license | vivek09pathak/ImageDetection_RealTime | 0720fb4a6f35a81591f401a04ae44aa3bbea013f | d9e376b41a1216aecaacc9626cee59d45001695c | refs/heads/master | 2022-12-26T22:04:18.328476 | 2020-09-30T10:20:15 | 2020-09-30T10:20:15 | 152,729,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,380 | py | import cv2
import numpy as np
from data.videos import path as videos_path
def from_image(image):
try:
grid_interval = 25
grid_color = (200, 100, 200)
points = []
current = [0, 0]
width = image.shape[1]
height = image.shape[0]
img = image.copy()
c_x = int(width / 2)
c_y = int(height / 2)
for i in range(0, c_x + 1, grid_interval):
cv2.line(img, (i, 0), (i, height), grid_color, 1)
cv2.line(img, (width - i, 0), (width - i, height), grid_color, 1)
for i in range(0, c_y + 1, grid_interval):
cv2.line(img, (0, i), (width, i), grid_color, 1)
cv2.line(img, (0, height - i), (width, height - i), grid_color, 1)
def select_point(event, x, y, flags, param):
current[0] = x
current[1] = y
if event == cv2.EVENT_LBUTTONDBLCLK:
points.append([x, y])
winname = 'window1'
print(winname)
cv2.namedWindow(winname)
cv2.imshow(winname, image)
cv2.resizeWindow(winname, 200, 200)
cv2.setMouseCallback(winname, select_point)
cv2.moveWindow(winname, 0, 0)
while True:
temp_img = img.copy()
cv2.putText(temp_img, str(current), (current[0] + 20, current[1]), cv2.FONT_HERSHEY_PLAIN, 0.5,
(255, 255, 255), 1)
for point in points:
cv2.circle(temp_img, (point[0], point[1]), 1, (255, 0, 0), -1)
cv2.imshow(winname, temp_img)
k = cv2.waitKey(20) & 0xFF
if k == 8:
try:
points.pop()
except:
pass
if k == 27:
break
print("Here!!!")
roi = np.float32(np.array(points.copy()))
mark = 0.47 * width
temp_img = image.copy()
cv2.polylines(temp_img, [np.int32(roi)], 1, (0, 255, 0), 3)
cv2.imshow(winname, temp_img)
cv2.waitKey(0)
roi = roi.tolist()
if roi:
return roi
while(True):
k = cv2.waitKey(0)
except:
pass
if __name__ == '__main__':
cap = cv2.VideoCapture(videos_path.get()+'/ra_rafee_cabin_1.mp4')
ret = False
while not ret:
ret, frame = cap.read()
print(from_image(frame)) | [
"[email protected]"
] | |
ab2e4af17f8e39c556b8394fc307067c0fcf635b | da0d673da16f92ffed008b4c8b8c82c336d78122 | /server/app.py | 90039f57aee1197bfae1a0fe21bfe9586aac1f61 | [] | no_license | aparkalov/sc-web | 5dcac607c42376df205c6a025dbfe076f018970b | 43c0d79f0fefa435bb9f53b230e9b9048e000613 | refs/heads/master | 2021-01-17T18:21:58.247615 | 2015-10-25T20:45:11 | 2015-10-25T20:45:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,155 | py | import tornado.ioloop
import tornado.web
import tornado.options
import secret
import os
from handlers.main import MainHandler
import handlers.api as api
import handlers.auth as auth
import admin.main as admin
import admin.users as admin_users
import ws, db
is_closing = False
def signal_handler(signum, frame):
global is_closing
is_closing = True
def try_exit():
global is_closing
if is_closing:
# clean up here
tornado.ioloop.IOLoop.instance().stop()
class NoCacheStaticHandler(tornado.web.StaticFileHandler):
""" Request static file handlers for development and debug only.
It disables any caching for static file.
"""
def set_extra_headers(self, path):
self.set_header('Cache-Control', 'no-store, no-cache, must-revalidate, max-age=0')
def main():
tornado.options.define("static_path", default = "../client/static", help = "path to static files directory", type = str)
tornado.options.define("templates_path", default = "../client/templates", help = "path to template files directory", type = str)
tornado.options.define("sctp_port", default = 55770, help = "port of sctp server", type = int)
tornado.options.define("sctp_host", default = "localhost", help = "host of sctp server", type = str)
tornado.options.define("event_wait_timeout", default = 10, help = "time to wait commands processing", type = int)
tornado.options.define("idtf_serach_limit", default = 30, help = "number of maximum results for searching by identifier", type = int)
tornado.options.define("redis_host", default = "localhost", help = "host of redis server", type = str)
tornado.options.define("redis_port", default = 6379, help = "port of redis server", type = int)
tornado.options.define("redis_db_idtf", default = 0, help = "number of redis database to store identifiers", type = int)
tornado.options.define("redis_db_user", default = 1, help = "number of redis database to store user info", type = int)
tornado.options.define("host", default = "localhost", help = "host name", type = str)
tornado.options.define("port", default = 8000, help = "host port", type = int)
tornado.options.define("google_client_id", default = "", help = "client id for google auth", type = str)
tornado.options.define("google_client_secret", default = "", help = "client secret for google auth", type = str)
tornado.options.define("user_key_expire_time", default = 600, help = "user key expire time in seconds", type = int)
tornado.options.define("super_emails", default = "", help = "email of site super administrator (maximum rights)", type = list)
tornado.options.define("db_path", default = "data.db", help = "path to database file", type = str)
tornado.options.define("cfg", default = "server.conf", help = "path to configuration file", type = str)
tornado.options.parse_command_line()
if os.path.exists(tornado.options.options.cfg):
tornado.options.parse_config_file(tornado.options.options.cfg)
# prepare database
database = db.DataBase()
database.init()
rules = [
(r"/", MainHandler),
(r"/static/(.*)", NoCacheStaticHandler, {"path": tornado.options.options.static_path}),
# api
(r"/api/init/", api.Init),
(r"/api/context/", api.ContextMenu),
(r"/api/cmd/do/", api.CmdDo),
(r"/api/question/answer/translate/", api.QuestionAnswerTranslate),
(r"/api/link/content/", api.LinkContent),
(r"/api/link/format/", api.LinkFormat),
(r"/api/languages/", api.Languages),
(r"/api/languages/set/", api.LanguageSet),
(r"/api/idtf/find/", api.IdtfFind),
(r"/api/idtf/resolve/", api.IdtfResolve),
(r"/api/addr/resolve/", api.AddrResolve),
(r"/api/info/tooltip/", api.InfoTooltip),
(r"/api/user/", api.User),
(r"/auth/google$", auth.GoogleOAuth2LoginHandler),
(r"/auth/logout$", auth.LogOut),
(r"/admin$", admin.MainHandler),
(r"/admin/users/get$", admin_users.UsersInfo),
(r"/admin/users/set_rights$", admin_users.UserSetRights),
(r"/admin/users/list_rights$", admin_users.UserListRights),
(r"/sctp", ws.SocketHandler),
]
application = tornado.web.Application(
handlers = rules,
cookie_secret = secret.get_secret(),
login_url = "/auth/google",
template_path = tornado.options.options.templates_path,
xsrf_cookies = False,
gzip = True,
google_oauth = {"key": tornado.options.options.google_client_id,
"secret": tornado.options.options.google_client_secret
}
)
application.listen(tornado.options.options.port)
tornado.ioloop.PeriodicCallback(try_exit, 1000).start()
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
efdf17b0386a062ae1786386cfcd575b6dfe973d | 397e125e94f4f139f2bf5055824d81f24b8b1757 | /ABC/165/F-1.py | e46b77a108de4a46bd7d12686ca0056a7ddfe1e2 | [] | no_license | tails1434/Atcoder | ecbab6ee238e3f225551297db961b1b502841fa4 | e7c7fed36be46bbaaf020a70997842240ba98d62 | refs/heads/master | 2021-07-07T00:31:49.235625 | 2020-09-30T01:42:01 | 2020-09-30T01:42:01 | 189,009,622 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,035 | py | import sys
import bisect
from collections import deque
sys.setrecursionlimit(4100000)
input = sys.stdin.readline
def main():
N = int(input())
A = list(map(int, input().split()))
edge = [[] for _ in range(N)]
for i in range(N-1):
u, v = map(int, input().split())
u -= 1
v -= 1
edge[u].append(v)
edge[v].append(u)
ans = [0] * (N + 1)
LIS = [-1]
def dfs(v, p = N):
if A[v] > LIS[-1]:
LIS.append(A[v])
ans[v] = ans[p] + 1
for u in edge[v]:
if u == p:
continue
dfs(u,v)
LIS.pop()
else:
ans[v] = ans[p]
idx = bisect.bisect_left(LIS, A[v])
old = LIS[idx]
LIS[idx] = A[v]
for u in edge[v]:
if u == p:
continue
dfs(u,v)
LIS[idx] = old
dfs(0)
for i in range(N):
print(ans[i])
if __name__ == "__main__":
main() | [
"[email protected]"
] | |
e5298e2cb42519ec6fdfc02dc68398406969417c | cc096d321ab5c6abf54fdcea67f10e77cd02dfde | /flex-backend/pypy/translator/js/function.py | c14b8c8c19d81579e42f69e8c29f1ca539832eaa | [
"LicenseRef-scancode-unknown-license-reference",
"MIT"
] | permissive | limweb/flex-pypy | 310bd8fcd6a9ddc01c0b14a92f0298d0ae3aabd2 | 05aeeda183babdac80f9c10fca41e3fb1a272ccb | refs/heads/master | 2021-01-19T22:10:56.654997 | 2008-03-19T23:51:59 | 2008-03-19T23:51:59 | 32,463,309 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,649 | py | try:
set
except NameError:
from sets import Set as set
from pypy.objspace.flow import model as flowmodel
from pypy.rpython.lltypesystem.lltype import Signed, Unsigned, Void, Bool, Float
from pypy.rpython.lltypesystem.lltype import SignedLongLong, UnsignedLongLong
from pypy.rpython.ootypesystem import ootype
from pypy.translator.oosupport.metavm import Generator,InstructionList
from pypy.translator.oosupport import function
from pypy.translator.js.log import log
from types import FunctionType
import re
class BaseGenerator(object):
def load(self, v):
if isinstance(v, flowmodel.Variable):
if v.name in self.argset:
selftype, selfname = self.args[0]
if self.is_method and v.name == selfname:
self.ilasm.load_self()
else:
self.ilasm.load_arg(v)
else:
self.ilasm.load_local(v)
elif isinstance(v, flowmodel.Constant):
self.db.load_const(v.concretetype, v.value, self.ilasm)
elif isinstance(v, str):
self.ilasm.load_const("'" + v + "'")
else:
assert False
def store(self, v):
assert isinstance(v, flowmodel.Variable)
if v.concretetype is not Void:
self.ilasm.store_local(v)
else:
self.ilasm.store_void()
def change_name(self, name, to_name):
self.ilasm.change_name(name, to_name)
def add_comment(self, text):
pass
def function_signature(self, graph):
return self.cts.graph_to_signature(graph, False)
def class_name(self, ooinstance):
return ooinstance._name
def emit(self, instr, *args):
self.ilasm.emit(instr, *args)
def call_graph(self, graph):
self.db.pending_function(graph)
func_sig = self.function_signature(graph)
self.ilasm.call(func_sig)
def call_external(self, name, args):
self.ilasm.call((name, args))
#def call_signature(self, signature):
# self.ilasm.call(signature)
def cast_to(self, lltype):
cts_type = self.cts.lltype_to_cts(lltype, False)
self.ilasm.castclass(cts_type)
def new(self, obj):
self.ilasm.new(self.cts.obj_name(obj))
def set_field(self, obj, name):
self.ilasm.set_field(obj, name)
#self.ilasm.set_field(self.field_name(obj,name))
def get_field(self, useless_stuff, name):
self.ilasm.get_field(name)
def call_method(self, obj, name):
func_name, signature = self.cts.method_signature(obj, name)
self.ilasm.call_method(obj, name, signature)
def call_external_method(self, name, arg_len):
self.ilasm.call_method(None, name, [0]*arg_len)
def instantiate(self):
self.ilasm.runtimenew()
def downcast(self, TYPE):
pass
def load_special(self, v):
# special case for loading value
# when setting builtin field we need to load function instead of None
# FIXME: we cheat here
if isinstance(v, flowmodel.Constant) and v.concretetype is ootype.Void and isinstance(v.value, FunctionType):
graph = self.db.translator.annotator.bookkeeper.getdesc(v.value).cachedgraph(None)
self.db.pending_function(graph)
name = graph.name
self.ilasm.load_str(name)
else:
self.load(v)
def cast_function(self, name, num):
self.ilasm.cast_function(name, num)
def prefix_op(self, st):
self.ilasm.prefix_op(st)
def load_str(self, s):
self.ilasm.load_str(s)
def load_void(self):
self.ilasm.load_void()
def list_setitem(self, base_obj, item, val):
self.load(base_obj)
self.load(val)
self.load(item)
self.ilasm.list_setitem()
def list_getitem(self, base_obj, item):
self.load(base_obj)
self.load(item)
self.ilasm.list_getitem()
def push_primitive_constant(self, TYPE, value):
self.db.load_const(TYPE, value, self.ilasm)
def branch_unconditionally(self, target_label):
self.ilasm.jump_block(self.block_map[target_label])
def branch_conditionally(self, exitcase, target_label):
self.ilasm.branch_if(exitcase)
self.ilasm.jump_block(self.block_map[target_label])
self.ilasm.close_branch()
class Function(function.Function, BaseGenerator):
def __init__(self, db, graph, name=None, is_method=False,
is_entrypoint=False, _class=None):
self._class = _class
super(Function, self).__init__(db, graph, name, is_method, is_entrypoint)
self._set_args()
self._set_locals()
self.order = 0
self.name = name or self.db.get_uniquename(self.graph, self.graph.name)
def _setup_link(self, link, is_exc_link = False):
target = link.target
for to_load, to_store in zip(link.args, target.inputargs):
if to_load.concretetype is not Void:
if is_exc_link and isinstance(to_load, flowmodel.Variable) and re.match("last_exc_value", to_load.name):
self.ilasm.load_str("exc")
else:
self.load(to_load)
self.store(to_store)
def _create_generator(self, ilasm):
return self
def begin_render(self):
block_map = {}
for blocknum, block in enumerate(self.graph.iterblocks()):
block_map[self._get_block_name(block)] = blocknum
self.block_map = block_map
if self.is_method:
args = self.args[1:] # self is implicit
else:
args = self.args
if self.is_method:
self.ilasm.begin_method(self.name, self._class, [i[1] for i in args])
else:
self.ilasm.begin_function(self.name, args)
self.ilasm.set_locals(",".join([i[1] for i in self.locals]))
self.ilasm.begin_for()
def render_return_block(self, block):
return_var = block.inputargs[0]
if return_var.concretetype is not Void:
self.load(return_var)
self.ilasm.ret()
else:
self.ilasm.load_void()
self.ilasm.ret()
def end_render(self):
self.ilasm.end_for()
self.ilasm.end_function()
def render_raise_block(self, block):
self.ilasm.throw(block.inputargs[1])
def end_try(self, target_label):
self.ilasm.jump_block(self.block_map[target_label])
self.ilasm.catch()
#self.ilasm.close_branch()
def record_ll_meta_exc(self, ll_meta_exc):
pass
def begin_catch(self, llexitcase):
real_name = self.cts.lltype_to_cts(llexitcase._inst.class_._INSTANCE)
s = "isinstanceof(exc, %s)"%real_name
self.ilasm.branch_if_string(s)
def end_catch(self, target_label):
""" Ends the catch block, and branchs to the given target_label as the
last item in the catch block """
self.ilasm.close_branch()
def store_exception_and_link(self, link):
self._setup_link(link, True)
self.ilasm.jump_block(self.block_map[self._get_block_name(link.target)])
def after_except_block(self):
#self.ilasm.close_branch()
self.ilasm.throw_real("exc")
self.ilasm.close_branch()
def set_label(self, label):
self.ilasm.write_case(self.block_map[label])
#self.ilasm.label(label)
def begin_try(self):
self.ilasm.begin_try()
def clean_stack(self):
self.ilasm.clean_stack()
| [
"lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d"
] | lucio.torre@dbd81ab4-9648-0410-a770-9b81666e587d |
02c88c50e679434e3e4ae163ee0e5026f6e74efc | e489172f6e49e1239db56c047a78a29a6ffc0b36 | /via_purchase_enhancements/stock.py | 94ac503ae599e50b3f088a456a70e97b5fede72d | [] | no_license | eksotama/prln-via-custom-addons | f05d0059353ae1de89ccc8d1625a896c0215cfc7 | f2b44a8af0e7bee87d52d258fca012bf44ca876f | refs/heads/master | 2020-03-25T19:49:08.117628 | 2015-12-01T07:29:43 | 2015-12-01T07:29:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,321 | py | ##############################################################################
#
# Vikasa Infinity Anugrah, PT
# Copyright (c) 2011 - 2012 Vikasa Infinity Anugrah <http://www.infi-nity.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
from osv import osv, fields
class stock_move(osv.osv):
_inherit = 'stock.move'
_columns = {
'po_line_desc':fields.related('purchase_line_id', 'name', type="char", string="PO Description", readonly=True),
'po_line_note':fields.related('purchase_line_id', 'notes', type="text", string="PO Notes", readonly=True),
}
stock_move()
| [
"aero@aero.(none)"
] | aero@aero.(none) |
680addf26dba5b848a8d83cd34e73d8f679a6b41 | b67ba573498318c906968bd2c946543dbd4658fe | /gravityspytools/retrain_model/views.py | 479c7bb11b8c958af3bc4fd322cdb30c88d2d2d7 | [
"BSD-3-Clause"
] | permissive | Gravity-Spy/gravityspytools | 1f86f91a00063afdfe507f1d1bf38be5c8e1b421 | 23ef83e36ed934f7c39440bf43f4d5c7b7b4abb0 | refs/heads/master | 2021-05-09T08:55:12.904449 | 2020-07-14T18:38:25 | 2020-07-14T18:38:25 | 119,413,494 | 4 | 4 | BSD-3-Clause | 2020-07-14T18:38:27 | 2018-01-29T17:05:08 | Python | UTF-8 | Python | false | false | 2,405 | py | # -*- coding: utf-8 -*-
#from __future__ import unicode_literals
from django.shortcuts import render, redirect
from login.utils import make_authorization_url
from collection_to_subjectset.utils import retrieve_subjects_from_collection
from .forms import NewClassForm
from .models import NewClass
from gwpy.table import EventTable
def index(request):
if request.user.is_authenticated:
form = NewClassForm()
return render(request, 'retrain-model-form.html', {'form': form})
else:
return redirect(make_authorization_url())
def retrain_model(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = NewClassForm(request.POST)
# check whether it's valid:
if form.is_valid():
collection_owner = str(form.cleaned_data['collection_owner'])
collection_name = str(form.cleaned_data['collection_name'])
new_class_name = str(form.cleaned_data['new_class_name'])
# First determine the subjects attempting to be added to the training set
subjects_in_collection, tmp = retrieve_subjects_from_collection(collection_owner, collection_name)
subjects_in_collection = [str(isubject) for isubject in subjects_in_collection]
new_subjects = list(EventTable.fetch('gravityspy',
'glitches WHERE CAST(links_subjects AS FLOAT) IN ({0})'.format(str(",".join(subjects_in_collection))),
columns=["gravityspy_id"], host='gravityspyplus.ciera.northwestern.edu')['gravityspy_id'])
requested_model, created = NewClass.objects.get_or_create(collection_owner=collection_owner,
collection_name=collection_name,
new_class_name=new_class_name,
new_subjects=new_subjects,
user=request.user)
requested_model.save()
return render(request, 'temp.html')
else:
return render(request, 'retrain-model-form.html', {'form': form})
| [
"[email protected]"
] | |
f6dfeaf665aeacee6e8e8506307ea7e21cea454e | 747f759311d404af31c0f80029e88098193f6269 | /addons/purchase_confirm/__init__.py | dff9e4121dca62a7713e16faec859bfcefc3e483 | [] | no_license | sgeerish/sirr_production | 9b0d0f7804a928c0c582ddb4ccb7fcc084469a18 | 1081f3a5ff8864a31b2dcd89406fac076a908e78 | refs/heads/master | 2020-05-19T07:21:37.047958 | 2013-09-15T13:03:36 | 2013-09-15T13:03:36 | 9,648,444 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 66 | py | /home/openerp/production/extra-addons/purchase_confirm/__init__.py | [
"[email protected]"
] | |
5d17e513b427415520b3fd591509b1b5542e8fb0 | 556db265723b0cc30ad2917442ed6dad92fd9044 | /tensorflow/lite/tutorials/dataset.py | fdaf84c2bb43e306fe0bf9c9172c996cdcefe1c6 | [
"MIT",
"Apache-2.0",
"BSD-2-Clause"
] | permissive | graphcore/tensorflow | c1669b489be0e045b3ec856b311b3139858de196 | 085b20a4b6287eff8c0b792425d52422ab8cbab3 | refs/heads/r2.6/sdk-release-3.2 | 2023-07-06T06:23:53.857743 | 2023-03-14T13:04:04 | 2023-03-14T13:48:43 | 162,717,602 | 84 | 17 | Apache-2.0 | 2023-03-25T01:13:37 | 2018-12-21T13:30:38 | C++ | UTF-8 | Python | false | false | 4,299 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""tf.data.Dataset interface to the MNIST dataset.
This is cloned from
https://github.com/tensorflow/models/blob/master/official/r1/mnist/dataset.py
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gzip
import os
import shutil
import tempfile
import numpy as np
from six.moves import urllib
import tensorflow as tf
def read32(bytestream):
"""Read 4 bytes from bytestream as an unsigned 32-bit integer."""
dt = np.dtype(np.uint32).newbyteorder('>')
return np.frombuffer(bytestream.read(4), dtype=dt)[0]
def check_image_file_header(filename):
"""Validate that filename corresponds to images for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_images, unused
rows = read32(f)
cols = read32(f)
if magic != 2051:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
if rows != 28 or cols != 28:
raise ValueError(
'Invalid MNIST file %s: Expected 28x28 images, found %dx%d' %
(f.name, rows, cols))
def check_labels_file_header(filename):
"""Validate that filename corresponds to labels for the MNIST dataset."""
with tf.gfile.Open(filename, 'rb') as f:
magic = read32(f)
read32(f) # num_items, unused
if magic != 2049:
raise ValueError('Invalid magic number %d in MNIST file %s' % (magic,
f.name))
def download(directory, filename):
"""Download (and unzip) a file from the MNIST dataset if not already done."""
filepath = os.path.join(directory, filename)
if tf.gfile.Exists(filepath):
return filepath
if not tf.gfile.Exists(directory):
tf.gfile.MakeDirs(directory)
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
url = 'https://storage.googleapis.com/cvdf-datasets/mnist/' + filename + '.gz'
_, zipped_filepath = tempfile.mkstemp(suffix='.gz')
print('Downloading %s to %s' % (url, zipped_filepath))
urllib.request.urlretrieve(url, zipped_filepath)
with gzip.open(zipped_filepath, 'rb') as f_in, \
tf.gfile.Open(filepath, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
os.remove(zipped_filepath)
return filepath
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
def train(directory):
"""tf.data.Dataset object for MNIST training data."""
return dataset(directory, 'train-images-idx3-ubyte',
'train-labels-idx1-ubyte')
def test(directory):
"""tf.data.Dataset object for MNIST test data."""
return dataset(directory, 't10k-images-idx3-ubyte', 't10k-labels-idx1-ubyte')
| [
"[email protected]"
] | |
27175455b13f7f9029aeb649eab51391f2080bbf | 6be8aa517e679b33b47d35f100e6590902a8a1db | /Math/Problem05.py | bd5e99bc0820fd1c4d6b55798e43ebf0508f2baa | [] | no_license | LeeJuhae/Algorithm-Python | 7ca4762712e5e84d1e277abecb3bf39c9cbd4e56 | 729947b4428205adfbac194a5527b0eeafe1c525 | refs/heads/master | 2023-04-24T01:02:36.430970 | 2021-05-23T07:17:25 | 2021-05-23T07:17:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,075 | py | # https://www.acmicpc.net/problem/1722
import sys
read = sys.stdin.readline
n = int(read().strip())
cmd = list(map(int, read().strip().split()))
cmd[0] -= 1
fact = [-1 for _ in range(21)]
fact[0] = 1
for i in range(1, 21):
fact[i] = fact[i - 1] * i
def go(d, ans):
global k, arr, fact
if d == n:
print(" ".join(map(str, ans)))
sys.exit()
c = len(arr)
cnt = fact[c - 1]
for i in range(c):
if k <= (i + 1) * cnt:
ans.append(arr[i])
arr.pop(i)
k -= i * cnt
go(d + 1, ans)
def binary(arr, target):
l, r = 0, len(arr)
while l < r:
mid = (l + r) // 2
if arr[mid] < target:
l = mid + 1
else:
r = mid
return r
if cmd[0]:
# ๋ช ๋ฒ์งธ์ธ์ง ์ฐพ๊ธฐ
ret = 0
arr = list(range(1, n + 1))
for e in cmd[1:]:
n -= 1
idx = binary(arr, e)
arr.pop(idx)
ret += fact[n] * idx
print(ret + 1)
else:
# ์์ด ์ฐพ๊ธฐ
k = cmd[1]
arr = list(range(1, n + 1))
go(0, [])
| [
"[email protected]"
] | |
0acfaee2a19dd42351019acacada350f2ece6f31 | fbe2c3b1feb69a5ce019c805594a49dc11c7e463 | /astrality/tests/module/module_manager/test_persisting_created_files.py | c7cf4305b01e387d33b1c69a69fdc1345d8fd310 | [
"MIT"
] | permissive | JakobGM/astrality | 50630a26ef6428a0c1376269d71ddaa52912f374 | 72935b616f9a6a2e9254e9cd9319b525c596e8f0 | refs/heads/master | 2023-01-07T20:26:05.925893 | 2019-11-19T10:15:36 | 2019-11-19T10:15:36 | 117,895,437 | 114 | 7 | MIT | 2022-12-26T20:49:19 | 2018-01-17T21:34:33 | Python | UTF-8 | Python | false | false | 2,518 | py | """Tests for ensuring that all files that are created are persisted."""
import pytest
from astrality.module import ModuleManager
from astrality.persistence import CreatedFiles
@pytest.mark.parametrize('method', ['compile', 'copy', 'symlink'])
def test_that_created_files_are_persisted(method, create_temp_files):
"""When modules create files, they should be persisted."""
(
template1,
template2,
template3,
target1,
target2,
target3,
) = create_temp_files(6)
# Delete targets to prevent backups from being restored
for target in (target1, target2, target3):
target.unlink()
modules = {
'A': {
method: [
{
'content': str(template1),
'target': str(target1),
},
{
'content': str(template2),
'target': str(target2),
},
],
},
'B': {
method: {
'content': str(template3),
'target': str(target3),
},
},
}
module_manager = ModuleManager(modules=modules)
module_manager.finish_tasks()
created_files = CreatedFiles()
assert created_files.by(module='A') == [target1, target2]
assert created_files.by(module='B') == [target3]
# Now we should be able to cleanup the created files
assert target1.exists()
assert target2.exists()
assert target3.exists()
# First let's see if dry run is respected
created_files.cleanup(module='A', dry_run=True)
assert target1.exists()
assert target2.exists()
assert target3.exists()
assert created_files.by(module='A') == [target1, target2]
assert created_files.by(module='B') == [target3]
# Now see if we can cleanup module A and let B stay intact
created_files.cleanup(module='A')
assert not target1.exists()
assert not target2.exists()
assert target3.exists()
assert created_files.by(module='A') == []
assert created_files.by(module='B') == [target3]
# Now all files should be cleaned
created_files.cleanup(module='B')
assert not target3.exists()
assert created_files.by(module='A') == []
assert created_files.by(module='B') == []
# Let's see if it has been properly persisted too
del created_files
created_files = CreatedFiles()
assert created_files.by(module='A') == []
assert created_files.by(module='B') == []
| [
"[email protected]"
] | |
bd8120b22e58c6e63c7601d35545bfd5546febc3 | f7c5e3f5834206a7b0d1dadd773d1de032f731e7 | /dmerce1/db2HTML.py | ae12d22aacbefa718a58e88f68f863685e00d0f8 | [] | no_license | rbe/dmerce | 93d601462c50dfbbf62b577803ae697d3abde333 | 3cfcae894c165189cc3ff61e27ca284f09e87871 | refs/heads/master | 2021-01-01T17:06:27.872197 | 2012-05-04T07:22:26 | 2012-05-04T07:22:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,326 | py | #!/usr/bin/env python
##
#
# $Author: rb $
# $Revision: 1.2.7.1 $
#
# Revision 1.1 2000-07-13 17:05:50+02 rb
# Initial revision
#
##
#
# I M P O R T M O D U L E S
#
try:
import sys
import aDataStruct
except:
print '[db2HTML: ERROR LOADING MODULES: (%s, %s)]' % (sys.exc_info()[0], sys.exc_info()[1])
sys.exit()
#####################################################################
#
# C L A S S E S
#
#####################################################################
#
# Process <select>-tags
#
class select(aDataStruct.aDS):
#
# Constructor
#
def __init__(self):
# Call constructor of aDS
aDataStruct.aDS.__init__(self)
#
# Generate <option>-fields out of a database table
#
def mkOptions(self, table = '', qSuperSearch = '', optionValue = '', optionName = '', selected = ''):
# Build SQL statement
stmt = 'SELECT %s, %s FROM %s WHERE %s' % (optionValue, optionName, table, self.convertqSuperSearch(qSuperSearch))
# Query database
rowCount = self.SQLAL.query(stmt)
# Check rowCount
if not rowCount:
# Return false
return 0
# Fetch result
result = self.SQLAL.fetchall()
# Generate <option>-fields
optionList = [] # Init list
for i in range(0, len(result)):
# Init <option>-tag
appStr = '<option value="%s"' % result[i][0]
# Check if value should be selected
if selected:
if result[i][0] == selected:
appStr = '%s selected' % appStr
appStr = '%s>%s</option>' % (appStr, result[i][1])
# Append to list
optionList.append(appStr)
# Return list of strings
return optionList
#
# Process boxes
#
class box(aDataStruct.aDS):
#
# Constructor
#
def __init__(self):
# Call constructor of aDS
aDataStruct.aDS.__init__(self)
#
# Return nothing or 'checked'
#
#def isChecked(self, var = 0):
# Check value of 'var'
#if var == 1 or var == 'on':
# Yes it is!
#return ' checked'
#else:
# Return nothing
#return ''
#
# Return '' or 'checked'
#
def isChecked(self, expr = ''):
# If true
if expr:
# Return string 'checked'
return 'checked'
# If false, return nothing
else:
return ''
| [
"[email protected]"
] | |
c65381eabc7b6d43ed0b44838ef96d25e0eb2cd4 | 0c5b9ebee22450c214576f18929436527b26a1b0 | /starfish/core/morphology/Merge/test/test_simple.py | f5e8ab5623aa77a99afea463b53830f01dce2371 | [
"MIT"
] | permissive | spacetx/starfish | 962b4e4a8c0c193acaa84c016a6edaef76c14769 | 853f56c7c02b15397adb921db5e3bde02fdadb63 | refs/heads/master | 2023-03-09T13:51:30.772904 | 2022-09-06T22:16:25 | 2022-09-06T22:16:25 | 92,539,237 | 211 | 75 | MIT | 2023-02-11T01:52:25 | 2017-05-26T18:45:11 | Python | UTF-8 | Python | false | false | 3,186 | py | import numpy as np
import pytest
from starfish.core.morphology.binary_mask import BinaryMaskCollection
from starfish.core.morphology.binary_mask.test.factories import (
binary_arrays_2d,
binary_mask_collection_2d,
binary_mask_collection_3d,
)
from starfish.core.morphology.util import _ticks_equal
from starfish.core.types import Axes, Coordinates
from ..simple import SimpleMerge
def test_success():
mask_collection_0 = binary_mask_collection_2d()
binary_arrays, physical_ticks = binary_arrays_2d()
binary_arrays_negated = [
np.bitwise_not(binary_array)
for binary_array in binary_arrays
]
mask_collection_1 = BinaryMaskCollection.from_binary_arrays_and_ticks(
binary_arrays_negated, None, physical_ticks, None)
merged = SimpleMerge().run([mask_collection_0, mask_collection_1])
assert _ticks_equal(merged._pixel_ticks, mask_collection_0._pixel_ticks)
assert _ticks_equal(merged._physical_ticks, mask_collection_0._physical_ticks)
assert len(mask_collection_0) + len(mask_collection_1) == len(merged)
# go through all the original uncroppped masks, and verify that they are somewhere in the merged
# set.
for mask_collection in (mask_collection_0, mask_collection_1):
for ix in range(len(mask_collection)):
uncropped_original_mask = mask_collection.uncropped_mask(ix)
for jx in range(len(merged)):
uncropped_copy_mask = merged.uncropped_mask(jx)
if uncropped_original_mask.equals(uncropped_copy_mask):
# found the copy, break
break
else:
pytest.fail("could not find mask in merged set.")
def test_pixel_tick_mismatch():
mask_collection_0 = binary_mask_collection_2d()
mask_collection_0._pixel_ticks[Axes.X.value] = np.asarray(
mask_collection_0._pixel_ticks[Axes.X.value]) + 1
binary_arrays, physical_ticks = binary_arrays_2d()
binary_arrays_negated = [
np.bitwise_not(binary_array)
for binary_array in binary_arrays
]
mask_collection_1 = BinaryMaskCollection.from_binary_arrays_and_ticks(
binary_arrays_negated, None, physical_ticks, None)
with pytest.raises(ValueError):
SimpleMerge().run([mask_collection_0, mask_collection_1])
def test_physical_tick_mismatch():
mask_collection_0 = binary_mask_collection_2d()
mask_collection_0._physical_ticks[Coordinates.X] = np.asarray(
mask_collection_0._physical_ticks[Coordinates.X]) + 1
binary_arrays, physical_ticks = binary_arrays_2d()
binary_arrays_negated = [
np.bitwise_not(binary_array)
for binary_array in binary_arrays
]
mask_collection_1 = BinaryMaskCollection.from_binary_arrays_and_ticks(
binary_arrays_negated, None, physical_ticks, None)
with pytest.raises(ValueError):
SimpleMerge().run([mask_collection_0, mask_collection_1])
def test_shape_mismatch():
mask_collection_0 = binary_mask_collection_2d()
mask_collection_1 = binary_mask_collection_3d()
with pytest.raises(ValueError):
SimpleMerge().run([mask_collection_0, mask_collection_1])
| [
"[email protected]"
] | |
476ccba49a18e2d3356d794c26547cdac706b13b | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2344/60581/246336.py | 5e8a23f13ab4797d8c3277856fd1dc425e03eb8f | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,584 | py | import sys
lst = []
for line in sys.stdin:
if line.strip()=="":
break
lst.append(line)
input = []
#่ฏปๅ
ฅๅค็
for i in range(0,len(lst)):
theLine = []
j = 0
while j < len(lst[i]):
str = ''
judgeWord = False
judgeNumber = False
if lst[i][j]>='A' and lst[i][j]<='Z':
judgeWord = True
str += lst[i][j]
while judgeWord:
j += 1
if j == len(lst[i]):
theLine.append(str)
break
if lst[i][j]>='A' and lst[i][j]<='Z':
str += lst[i][j]
else:
judgeWord = False
theLine.append(str)
if lst[i][j]>='0' and lst[i][j]<='9':
judgeNumber = True
str += lst[i][j]
while judgeNumber:
j += 1
if j == len(lst[i]):
theLine.append(int(str))
break
if lst[i][j]>='0' and lst[i][j]<='9':
str += lst[i][j]
else:
judgeNumber = False
theLine.append(int(str))
j += 1
input.append(theLine)
testNumber = input[0][0]
start = 1
count = 0
while count < testNumber:
numbers = input[start][0]
numberList = input[start+1].copy()
switchNumber = input[start+2][0]
outPut1 = numberList[0:switchNumber]
outPut2 = numberList[switchNumber:]
for i in range(0,len(outPut1)):
outPut2.append(outPut1[i])
count += 1
start += 3
for i in range(0,len(outPut2)):
print(outPut2[i],end=" ") | [
"[email protected]"
] | |
46d15fda29e9d529e3b0e67d43c1e56866529bed | d2219febb4727a24856578c4c3963b8c081835cb | /usr/lib/enigma2/python/Plugins/Extensions/BBC-iPlayer-Recorder/__init__.py | abf38987968a5e8ea489beb842b10923f7d25362 | [] | no_license | linuxbox10/BBC-iPlayer-Recorder | 3c465cd6fcc237ba384c8c9b89488d86e3ad3f3b | 069e731b134fa6a08152a90cb122caa1d2800b66 | refs/heads/master | 2021-07-07T08:22:39.817143 | 2017-10-04T15:41:42 | 2017-10-04T15:41:42 | 105,785,840 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 102 | py | # Embedded file name: /usr/lib/enigma2/python/Plugins/Extensions/BBC_iPlayer_Recorder/__init__.py
pass | [
"[email protected]"
] | |
4a4a23e8b49a1a9d7382bc7f3f23df345aef6af1 | 2d5cc685fd861c16a44e6578dff659bc197d44f8 | /ioflo/aid/__init__.py | bc7963f4557e7b278b7b88d81bcbd4bb25ffcb27 | [
"Apache-2.0",
"LicenseRef-scancode-warranty-disclaimer",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | dreamerparadise/ioflo | b642e3325760d124c8c608cefd3fb23c408785ff | 177ac656d7c4ff801aebb0d8b401db365a5248ce | refs/heads/master | 2023-04-03T04:05:24.934544 | 2020-11-19T22:07:49 | 2020-11-19T22:07:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | """
aid package
helper and utility modules
"""
# frequent imports
from .odicting import odict, lodict, modict
from .osetting import oset
from .consoling import getConsole
| [
"[email protected]"
] | |
eb86106a321ead443d3fef7263cc6bf014b7c342 | ec0b8bfe19b03e9c3bb13d9cfa9bd328fb9ca3f1 | /res/packages/scripts/scripts/client/gui/shared/utils/requesters/IntSettingsRequester.py | 5167f471034e02e31839cd698900e678c4a0e863 | [] | no_license | webiumsk/WOT-0.9.20.0 | de3d7441c5d442f085c47a89fa58a83f1cd783f2 | 811cb4e1bca271372a1d837a268b6e0e915368bc | refs/heads/master | 2021-01-20T22:11:45.505844 | 2017-08-29T20:11:38 | 2017-08-29T20:11:38 | 101,803,045 | 0 | 1 | null | null | null | null | WINDOWS-1250 | Python | false | false | 4,446 | py | # 2017.08.29 21:50:18 Stลednรญ Evropa (letnรญ ฤas)
# Embedded file name: scripts/client/gui/shared/utils/requesters/IntSettingsRequester.py
import BigWorld
import copy
import constants
from adisp import async, process
from debug_utils import LOG_ERROR, LOG_WARNING
from gui.shared.utils import code2str
class IntSettingsRequester(object):
"""
Setting dictionary presenting int settings keys by section names.
Don't forget to duplicate new value in common.constanst.INT_USER_SETTINGS_KEYS
"""
SETTINGS = {'VERSION': 0,
'GAME': 1,
'GRAPHICS': 2,
'SOUND': 3,
'CONTROLS': 4,
'AIM_ARCADE_1': 43,
'AIM_ARCADE_2': 44,
'AIM_ARCADE_3': 45,
'AIM_SNIPER_1': 46,
'AIM_SNIPER_2': 47,
'AIM_SNIPER_3': 48,
'MARKERS_ENEMY': 49,
'MARKERS_DEAD': 50,
'MARKERS_ALLY': 51,
'GUI_START_BEHAVIOR': 52,
'FEEDBACK': 53,
'EULA_VERSION': constants.USER_SERVER_SETTINGS.EULA_VERSION,
'GAMEPLAY': 55,
'FORT': 56,
'USERS_STORAGE_REV': 57,
'CONTACTS': 58,
'GAME_EXTENDED': constants.USER_SERVER_SETTINGS.GAME_EXTENDED,
'FALLOUT': 60,
'TUTORIAL': 61,
'AIM_ARCADE_4': 63,
'AIM_SNIPER_4': 64,
'MARKS_ON_GUN': constants.USER_SERVER_SETTINGS.HIDE_MARKS_ON_GUN,
'ONCE_ONLY_HINTS': 70,
'CAROUSEL_FILTER_1': 73,
'CAROUSEL_FILTER_2': 74,
'FALLOUT_CAROUSEL_FILTER_1': 75,
'FALLOUT_CAROUSEL_FILTER_2': 76,
'ENCYCLOPEDIA_RECOMMENDATIONS_1': 77,
'ENCYCLOPEDIA_RECOMMENDATIONS_2': 78,
'ENCYCLOPEDIA_RECOMMENDATIONS_3': 79,
'RANKED_CAROUSEL_FILTER_1': 80,
'RANKED_CAROUSEL_FILTER_2': 81,
'FEEDBACK_DAMAGE_INDICATOR': 82,
'FEEDBACK_DAMAGE_LOG': 83,
'FEEDBACK_BATTLE_EVENTS': 84}
def __init__(self):
self.__cache = dict()
def _response(self, resID, value, callback):
"""
Common server response method. Must be called ANYWAY after
server operation will complete.
@param resID: request result id
@param value: requested value
@param callback: function to be called after operation will complete
"""
if resID < 0:
LOG_ERROR('[class %s] There is error while getting data from cache: %s[%d]' % (self.__class__.__name__, code2str(resID), resID))
return callback(dict())
callback(value)
@async
def _requestCache(self, callback = None):
"""
Request data from server
"""
player = BigWorld.player()
if player is not None and player.intUserSettings is not None:
player.intUserSettings.getCache(lambda resID, value: self._response(resID, value, callback))
else:
LOG_WARNING('Player or intUserSettings is not defined', player, player.intUserSettings if player is not None else None)
return
@async
@process
def request(self, callback = None):
"""
Public request method. Validate player entity to request
possibility and itself as single callback argument.
"""
self.__cache = yield self._requestCache()
callback(self)
def getCacheValue(self, key, defaultValue = None):
"""
Public interface method to get value from cache.
@param key: value's key in cache
@param defaultValue: default value if key does not exist
@return: value
"""
return self.__cache.get(key, defaultValue)
@process
def setSetting(self, key, value):
yield self._addIntSettings({self.SETTINGS[key]: int(value)})
@process
def setSettings(self, settings):
intSettings = dict(map(lambda item: (self.SETTINGS[item[0]], int(item[1])), settings.iteritems()))
yield self._addIntSettings(intSettings)
def getSetting(self, key, defaultValue = None):
return self.getCacheValue(self.SETTINGS[key], defaultValue)
@async
def _addIntSettings(self, settings, callback = None):
import BattleReplay
if not BattleReplay.g_replayCtrl.isPlaying:
self.__cache.update(settings)
BigWorld.player().intUserSettings.addIntSettings(settings, callback)
# okay decompyling c:\Users\PC\wotmods\files\originals\res\packages\scripts\scripts\client\gui\shared\utils\requesters\IntSettingsRequester.pyc
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2017.08.29 21:50:18 Stลednรญ Evropa (letnรญ ฤas)
| [
"[email protected]"
] | |
0d5612b10b9d4ae1e7f93123afb238011de5556e | f25ffa3370e9fef64d22c1d639051b1dfea249fa | /crypto/test/SConstruct | fdf7840c9447955b9b1d2b2e644bad970526f938 | [] | no_license | sjkingo-archive/comp3301-a2-spec | d809a836ffbea155284c5314e3cb5261483de29b | 754a9cf23e7395961237df6b19303fce49bafdf5 | refs/heads/master | 2016-08-06T19:05:10.194681 | 2015-06-20T00:00:58 | 2015-06-20T00:00:58 | 37,751,102 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 106 | env = Environment(
CFLAGS=['-Wall', '-Wextra', '-g', '-std=gnu99'],
)
env.Program('smode', 'smode.c')
| [
"[email protected]"
] | ||
ec51a2c268481d48cc82a004658d99ace07708a4 | 6610e8b7d715d28df179217b0222b6a74d7b01f3 | /Contents/Libraries/Shared/omdb/api.py | e7c5f60a9f295f608c0cf9f27a8d959315e9eca3 | [] | no_license | BigmIkeX/FMoviesPlus.bundle | f479b96dabbda8a70944a7c44717a9b7a6abc5c0 | 7789e6137df8a86f6c9f9a78fc478ab89696e3d0 | refs/heads/master | 2020-05-22T09:39:12.049063 | 2019-05-09T20:54:25 | 2019-05-09T20:54:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,242 | py | """Public interface for omdb module
Accessible via:
import omdb
"""
from .client import Client
# Internal client instance used for our requests.
_client = Client()
def set_default(key, default):
"""Proxy method to internal client instance that sets default params
values.
"""
_client.set_default(key, default)
def get(**params):
"""Generic request."""
return _client.get(**params)
def search(string, **params):
"""Search by string."""
return get(search=string, **params)
def search_movie(string, **params):
"""Search movies by string."""
params['media_type'] = 'movie'
return search(string, **params)
def search_episode(string, **params):
"""Search episodes by string."""
params['media_type'] = 'episode'
return search(string, **params)
def search_series(string, **params):
"""Search series by string."""
params['media_type'] = 'series'
return search(string, **params)
def imdbid(string, **params):
"""Get by IMDB ID."""
return get(imdbid=string, **params)
def title(string, **params):
"""Get by title."""
return get(title=string, **params)
def request(**params):
"""Lower-level request."""
return _client.request(**params)
| [
"[email protected]"
] | |
a16bd241bd21dd97d1b8a5a7a1517e3416efbaf7 | 6310c834caf9f775d1ddeb0328a41d5648a733fa | /source/news_client/apps.py | d6b5afd22eed6a596ea215f66abe4ab00017b054 | [] | no_license | big-arturka/test_task | 54205245d26c966dd4c3982dc6809d8c989f2cad | 8dc37994754f7ef04c5c697b5090be65f7058ebd | refs/heads/master | 2023-06-06T19:36:38.291673 | 2021-07-08T05:55:32 | 2021-07-08T05:55:32 | 321,126,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 96 | py | from django.apps import AppConfig
class NewsClientConfig(AppConfig):
name = 'news_client'
| [
"[email protected]"
] | |
e80b2c632811677af15c91426fdb116e3238f867 | 1b082458d2815b7671a5d77e809834184e0dabfa | /vcr/request.py | bbe950b3edd673d60637aff3f6c8f797cf48f426 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | afthill/vcrpy | 6e4cc1cb842ddec75b73e579a12cf4ae7fb7ca8a | 5aa2fb017f8aad3455ff3291f2f749a5a45664fe | refs/heads/master | 2022-05-01T04:32:57.896556 | 2014-04-27T22:05:19 | 2014-04-27T22:05:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,456 | py | class Request(object):
def __init__(self, protocol, host, port, method, path, body, headers):
self.protocol = protocol
self.host = host
self.port = port
self.method = method
self.path = path
self.body = body
# make headers a frozenset so it will be hashable
self.headers = frozenset(headers.items())
def add_header(self, key, value):
tmp = dict(self.headers)
tmp[key] = value
self.headers = frozenset(tmp.iteritems())
@property
def url(self):
return "{0}://{1}{2}".format(self.protocol, self.host, self.path)
def __key(self):
return (
self.host,
self.port,
self.method,
self.path,
self.body,
self.headers
)
def __hash__(self):
return hash(self.__key())
def __eq__(self, other):
return hash(self) == hash(other)
def __str__(self):
return "<Request ({0}) {1}>".format(self.method, self.url)
def __repr__(self):
return self.__str__()
def _to_dict(self):
return {
'protocol': self.protocol,
'host': self.host,
'port': self.port,
'method': self.method,
'path': self.path,
'body': self.body,
'headers': self.headers,
}
@classmethod
def _from_dict(cls, dct):
return Request(**dct)
| [
"[email protected]"
] | |
67b2a52ec7eb04fbc8eb8dce6312fd2b614f37a3 | ef2018f6e2c8ccabf89cc5f0780cd8ded8db50c0 | /textreuse/fine_tuning/original_skipthought.py | f9dc21db43a4e8a69e8f8ba58f37585b6f07d8f2 | [] | no_license | emanjavacas/text-reuse | a8d14bfb105b30a3f7cf732f999027186ea899bd | 96e39a2e6982677f7d1f6b0f8397977155481b33 | refs/heads/master | 2022-01-05T22:13:54.800591 | 2019-06-04T19:51:12 | 2019-06-04T19:51:12 | 115,821,869 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,384 | py |
import os
import numpy as np
import skipthoughts
from textreuse.datasets import default_pairs, opusparcus_pairs
from textreuse.datasets import PATHS, OPUSPARCUS_PATH
if __name__ == '__main__':
model = skipthoughts.load_model()
for dataset in PATHS:
for split in PATHS[dataset]:
print(dataset, split)
sents, scores = zip(*default_pairs(PATHS[dataset][split]))
scores = np.array([float(s) for s in scores])
s1, s2 = zip(*sents)
s1, s2 = [' '.join(s) for s in s1], [' '.join(s) for s in s2]
s1, s2 = skipthoughts.encode(model, s1), skipthoughts.encode(model, s2)
with open('{}.{}.npz'.format(dataset.lower(), split), 'wb') as f:
np.savez(f, s1=s1, s2=s2, scores=scores)
for split in ('train', 'test', 'dev'):
print("OPUS: ", split)
sents, scores = zip(*opusparcus_pairs(OPUSPARCUS_PATH, split, maxlines=10000))
s1, s2 = zip(*sents)
s1, s2 = [' '.join(s) for s in s1], [' '.join(s) for s in s2]
s1, s2 = skipthoughts.encode(model, s1), skipthoughts.encode(model, s2)
with open('{}.{}.npz'.format('opusparcus', split), 'wb') as f:
if split == 'train':
np.savez(f, s1=s1, s2=s2)
else:
np.savez(f, s1=s2, s2=s2, scores=np.array([float(s) for s in scores]))
| [
"[email protected]"
] | |
a797ad4aa0901df584d197d3311931220a7fa063 | b38c1d72d7c5d688a5f0942895a5f762880196b6 | /blog/migrations/0001_initial.py | c589b4b16fa74505689fe1d50d1e5effa764d2fc | [] | no_license | zdimon/angular | c8a9874b427ca5bb4899c51c0dc0ba0d96191039 | ea625523761d6b3300c9d9fb3c0aa7070fb634da | refs/heads/master | 2020-05-17T01:28:35.187898 | 2015-07-17T15:29:16 | 2015-07-17T15:29:16 | 38,496,687 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,115 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('title', models.CharField(max_length=200, verbose_name=b'Title')),
('created_at', models.DateTimeField(auto_now_add=True)),
('content', models.TextField(verbose_name=b'Content', blank=True)),
],
),
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=200)),
],
),
migrations.AddField(
model_name='post',
name='topic',
field=models.ForeignKey(verbose_name=b'Topic', to='blog.Topic'),
),
]
| [
"[email protected]"
] | |
595c01bd43d08b6b8b738334caae83010da66da1 | 5a52ccea88f90dd4f1acc2819997fce0dd5ffb7d | /alipay/aop/api/domain/AlipayMarketingDataDashboardApplyModel.py | 25bfe04a2359d1e8304fbfc2eb32f13f51baf790 | [
"Apache-2.0"
] | permissive | alipay/alipay-sdk-python-all | 8bd20882852ffeb70a6e929038bf88ff1d1eff1c | 1fad300587c9e7e099747305ba9077d4cd7afde9 | refs/heads/master | 2023-08-27T21:35:01.778771 | 2023-08-23T07:12:26 | 2023-08-23T07:12:26 | 133,338,689 | 247 | 70 | Apache-2.0 | 2023-04-25T04:54:02 | 2018-05-14T09:40:54 | Python | UTF-8 | Python | false | false | 1,390 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayMarketingDataDashboardApplyModel(object):
def __init__(self):
self._dashboard_ids = None
@property
def dashboard_ids(self):
return self._dashboard_ids
@dashboard_ids.setter
def dashboard_ids(self, value):
if isinstance(value, list):
self._dashboard_ids = list()
for i in value:
self._dashboard_ids.append(i)
def to_alipay_dict(self):
params = dict()
if self.dashboard_ids:
if isinstance(self.dashboard_ids, list):
for i in range(0, len(self.dashboard_ids)):
element = self.dashboard_ids[i]
if hasattr(element, 'to_alipay_dict'):
self.dashboard_ids[i] = element.to_alipay_dict()
if hasattr(self.dashboard_ids, 'to_alipay_dict'):
params['dashboard_ids'] = self.dashboard_ids.to_alipay_dict()
else:
params['dashboard_ids'] = self.dashboard_ids
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayMarketingDataDashboardApplyModel()
if 'dashboard_ids' in d:
o.dashboard_ids = d['dashboard_ids']
return o
| [
"[email protected]"
] | |
c699bc32127b8584ebb9ae1852fe8baa996cea26 | 0f7d186164cafdd90b3e0baae0b5573fead049b4 | /ๅคไปปๅก/็บฟ็จ/07-็บฟ็จๅ
ฑไบซๅ
จๅฑๅ้.py | 5b263873df2b9925669df27515c6809dbc31f50f | [] | no_license | DominicRen/Python-learning | 603b31ff173cbc0782b3cf3da12d5e0834a74402 | a9e7ded5fc0e2a760cee9887d87700769a3b9b6d | refs/heads/master | 2022-12-03T07:43:01.052201 | 2019-04-05T04:10:42 | 2019-04-05T04:10:42 | 179,615,987 | 1 | 2 | null | 2022-11-28T19:13:32 | 2019-04-05T03:54:26 | HTML | UTF-8 | Python | false | false | 490 | py | import threading
import time
# ๅฎไนไธไธชๅ
จๅฑๅ้
g_num = 100
def test1():
global g_num
g_num += 1
print("-----in test1 g_num = %d-----" % g_num)
def test2():
print("-----in test2 g_num = %d-----" % g_num)
def main():
t1 = threading.Thread(target=test1)
t2 = threading.Thread(target=test2)
t1.start()
time.sleep(1)
t2.start()
time.sleep(1)
print("-----in main Thread g_num = %d-----" % g_num)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
7cbfc835d5ee7dd706a39b957c365a77352048c5 | f64069f0dc90aa41947d1deca317aa37ad9c9080 | /CounterCode2015/Campers.py | 1be7dba22916115142a5cf672826cd46dc065798 | [] | no_license | raunaklakhwani/Algorithms | 9fdd7c3f5234b4682690ce3bededa365e75df1e5 | a0b36f4f068c100b17fd27b3ed28816e4f3b4e4f | refs/heads/master | 2021-01-10T19:39:24.120026 | 2015-12-20T14:14:48 | 2015-12-20T14:14:48 | 27,044,549 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 371 | py | # URL : https://www.hackerrank.com/contests/countercode/challenges/campers
from math import ceil
N,K = 8,2
snipers = [2,6]
snipers.sort()
def getBetween(a,b):
#print a,b
n = b-a+1
return int(ceil(n/2.0))
pre = 1
s = 0
for i in snipers:
s += getBetween(pre, i - 2)
#print s
pre = i + 2
s += getBetween(pre, N)
print s + K
| [
"[email protected]"
] | |
d6cac20aa12d749297a858a8968dcdd59024491d | ad846a63f010b808a72568c00de016fbe86d6c35 | /algotradingenv/lib/python3.8/site-packages/IPython/sphinxext/ipython_console_highlighting.py | e07bcc007d0673032ff83deee60ef9272ebd015b | [] | no_license | krishansinghal29/algotrade | 74ee8b1c9113812b1c7c00ded95d966791cf76f5 | 756bc2e3909558e9ae8b2243bb4dabc530f12dde | refs/heads/master | 2023-06-02T01:53:24.924672 | 2021-06-10T09:17:55 | 2021-06-10T09:17:55 | 375,641,074 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | """
reST directive for syntax-highlighting ipython interactive sessions.
"""
from sphinx import highlighting
from IPython.lib.lexers import IPyLexer
def setup(app):
"""Setup as a sphinx extension."""
# This is only a lexer, so adding it below to pygments appears sufficient.
# But if somebody knows what the right API usage should be to do that via
# sphinx, by all means fix it here. At least having this setup.py
# suppresses the sphinx warning we'd get without it.
metadata = {"parallel_read_safe": True, "parallel_write_safe": True}
return metadata
# Register the extension as a valid pygments lexer.
# Alternatively, we could register the lexer with pygments instead. This would
# require using setuptools entrypoints: http://pygments.org/docs/plugins
ipy2 = IPyLexer(python3=False)
ipy3 = IPyLexer(python3=True)
highlighting.lexers["ipython"] = ipy2
highlighting.lexers["ipython2"] = ipy2
highlighting.lexers["ipython3"] = ipy3
| [
"[email protected]"
] | |
bf77e59520837cbd065542476fdad5c4d1c0a67f | d3e723fe5eb20b868ed6bc7e3d228eba368f22ef | /feedly/feeds/redis.py | 03755ba7a7ced91466e476809ae1a508fc9d880f | [
"BSD-3-Clause"
] | permissive | intellisense/Feedly | f33fa42a0f41b73e8e728813ed311d06bd8fb668 | 4c5fb74aee56e5ff382417301a4825b151c474b0 | refs/heads/master | 2020-05-29T11:36:24.157740 | 2014-02-24T16:27:33 | 2014-02-24T16:27:33 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 916 | py | from feedly.feeds.base import BaseFeed
from feedly.storage.redis.activity_storage import RedisActivityStorage
from feedly.storage.redis.timeline_storage import RedisTimelineStorage
from feedly.serializers.activity_serializer import ActivitySerializer
class RedisFeed(BaseFeed):
timeline_storage_class = RedisTimelineStorage
activity_storage_class = RedisActivityStorage
activity_serializer = ActivitySerializer
# : allow you point to a different redis server as specified in
# : settings.FEEDLY_REDIS_CONFIG
redis_server = 'default'
@classmethod
def get_timeline_storage(cls):
timeline_storage_options = {
'redis_server': cls.redis_server,
}
timeline_storage = cls.timeline_storage_class(
**timeline_storage_options)
return timeline_storage
# : clarify that this feed supports filtering
filtering_supported = True
| [
"[email protected]"
] | |
1e27272a6df616fd8ab1ea720081875c1a06bca0 | 8b280840def0f170a16ea8e0668799e63759e9fc | /react_django/settings.py | 1998a01b5810359170d4c639a48fdfc9254837c0 | [] | no_license | panu2306/React-Django-REST | 0a40830048903a0be8a5d5661ec36e20f8d02656 | e0e5976e2b81a91661fed674fa7e1d8177e093f6 | refs/heads/master | 2021-04-02T01:44:17.179283 | 2020-03-18T12:55:26 | 2020-03-18T12:55:26 | 248,230,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,141 | py | """
Django settings for react_django project.
Generated by 'django-admin startproject' using Django 3.0.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.0/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.0/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+y)6$x%*h(866lbdx$5v2hwrb-9=ive2kf5$-2@kx+v%1o+!y6'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'leads',
'rest_framework',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'react_django.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'react_django.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.0/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/3.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.0/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.0/howto/static-files/
STATIC_URL = '/static/'
| [
"[email protected]"
] | |
9fcfda872969a04db6c4892cc52345f61d0a5a36 | 2ed86a79d0fcd299ad4a01310954c5eddcf01edf | /homeassistant/components/overkiz/climate_entities/atlantic_pass_apc_heating_zone.py | b6835d93ebb01978f226f745fb84a212a4de8ecd | [
"Apache-2.0"
] | permissive | konnected-io/home-assistant | 037f12c87bb79e19220192eb918e49db1b1a8b3e | 2e65b77b2b5c17919939481f327963abdfdc53f0 | refs/heads/dev | 2023-05-11T08:57:41.891518 | 2023-05-07T20:03:37 | 2023-05-07T20:03:37 | 109,931,626 | 24 | 10 | Apache-2.0 | 2023-02-22T06:24:01 | 2017-11-08T05:27:21 | Python | UTF-8 | Python | false | false | 7,956 | py | """Support for Atlantic Pass APC Heating Control."""
from __future__ import annotations
from typing import Any, cast
from pyoverkiz.enums import OverkizCommand, OverkizCommandParam, OverkizState
from homeassistant.components.climate import (
PRESET_AWAY,
PRESET_COMFORT,
PRESET_ECO,
PRESET_HOME,
PRESET_SLEEP,
ClimateEntity,
ClimateEntityFeature,
HVACMode,
)
from homeassistant.const import ATTR_TEMPERATURE, UnitOfTemperature
from ..const import DOMAIN
from ..coordinator import OverkizDataUpdateCoordinator
from ..entity import OverkizEntity
OVERKIZ_TO_HVAC_MODE: dict[str, str] = {
OverkizCommandParam.AUTO: HVACMode.AUTO,
OverkizCommandParam.ECO: HVACMode.AUTO,
OverkizCommandParam.MANU: HVACMode.HEAT,
OverkizCommandParam.HEATING: HVACMode.HEAT,
OverkizCommandParam.STOP: HVACMode.OFF,
OverkizCommandParam.EXTERNAL_SCHEDULING: HVACMode.AUTO,
OverkizCommandParam.INTERNAL_SCHEDULING: HVACMode.AUTO,
OverkizCommandParam.COMFORT: HVACMode.HEAT,
}
HVAC_MODE_TO_OVERKIZ = {v: k for k, v in OVERKIZ_TO_HVAC_MODE.items()}
PRESET_EXTERNAL = "external"
PRESET_FROST_PROTECTION = "frost_protection"
OVERKIZ_TO_PRESET_MODES: dict[str, str] = {
OverkizCommandParam.OFF: PRESET_ECO,
OverkizCommandParam.STOP: PRESET_ECO,
OverkizCommandParam.MANU: PRESET_COMFORT,
OverkizCommandParam.COMFORT: PRESET_COMFORT,
OverkizCommandParam.ABSENCE: PRESET_AWAY,
OverkizCommandParam.ECO: PRESET_ECO,
OverkizCommandParam.FROSTPROTECTION: PRESET_FROST_PROTECTION,
OverkizCommandParam.EXTERNAL_SCHEDULING: PRESET_EXTERNAL,
OverkizCommandParam.INTERNAL_SCHEDULING: PRESET_HOME,
}
PRESET_MODES_TO_OVERKIZ = {v: k for k, v in OVERKIZ_TO_PRESET_MODES.items()}
OVERKIZ_TO_PROFILE_MODES: dict[str, str] = {
OverkizCommandParam.OFF: PRESET_SLEEP,
OverkizCommandParam.STOP: PRESET_SLEEP,
OverkizCommandParam.ECO: PRESET_ECO,
OverkizCommandParam.ABSENCE: PRESET_AWAY,
OverkizCommandParam.MANU: PRESET_COMFORT,
OverkizCommandParam.DEROGATION: PRESET_COMFORT,
OverkizCommandParam.EXTERNAL_SETPOINT: PRESET_EXTERNAL,
OverkizCommandParam.FROSTPROTECTION: PRESET_FROST_PROTECTION,
OverkizCommandParam.COMFORT: PRESET_COMFORT,
}
OVERKIZ_TEMPERATURE_STATE_BY_PROFILE: dict[str, str] = {
OverkizCommandParam.ECO: OverkizState.CORE_ECO_HEATING_TARGET_TEMPERATURE,
OverkizCommandParam.COMFORT: OverkizState.CORE_COMFORT_HEATING_TARGET_TEMPERATURE,
OverkizCommandParam.DEROGATION: OverkizState.CORE_DEROGATED_TARGET_TEMPERATURE,
}
class AtlanticPassAPCHeatingZone(OverkizEntity, ClimateEntity):
"""Representation of Atlantic Pass APC Heating Zone Control."""
_attr_hvac_modes = [*HVAC_MODE_TO_OVERKIZ]
_attr_preset_modes = [*PRESET_MODES_TO_OVERKIZ]
_attr_supported_features = (
ClimateEntityFeature.TARGET_TEMPERATURE | ClimateEntityFeature.PRESET_MODE
)
_attr_temperature_unit = UnitOfTemperature.CELSIUS
_attr_translation_key = DOMAIN
def __init__(
self, device_url: str, coordinator: OverkizDataUpdateCoordinator
) -> None:
"""Init method."""
super().__init__(device_url, coordinator)
# Temperature sensor use the same base_device_url and use the n+1 index
self.temperature_device = self.executor.linked_device(
int(self.index_device_url) + 1
)
@property
def current_temperature(self) -> float | None:
"""Return the current temperature."""
if temperature := self.temperature_device.states[OverkizState.CORE_TEMPERATURE]:
return cast(float, temperature.value)
return None
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return OVERKIZ_TO_HVAC_MODE[
cast(str, self.executor.select_state(OverkizState.IO_PASS_APC_HEATING_MODE))
]
@property
def current_heating_profile(self) -> str:
"""Return current heating profile."""
return cast(
str,
self.executor.select_state(OverkizState.IO_PASS_APC_HEATING_PROFILE),
)
async def async_set_heating_mode(self, mode: str) -> None:
"""Set new heating mode and refresh states."""
await self.executor.async_execute_command(
OverkizCommand.SET_PASS_APC_HEATING_MODE, mode
)
if self.current_heating_profile == OverkizCommandParam.DEROGATION:
# If current mode is in derogation, disable it
await self.executor.async_execute_command(
OverkizCommand.SET_DEROGATION_ON_OFF_STATE, OverkizCommandParam.OFF
)
# We also needs to execute these 2 commands to make it work correctly
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_MODE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_PROFILE
)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
await self.async_set_heating_mode(HVAC_MODE_TO_OVERKIZ[hvac_mode])
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
await self.async_set_heating_mode(PRESET_MODES_TO_OVERKIZ[preset_mode])
@property
def preset_mode(self) -> str:
"""Return the current preset mode, e.g., home, away, temp."""
heating_mode = cast(
str, self.executor.select_state(OverkizState.IO_PASS_APC_HEATING_MODE)
)
if heating_mode == OverkizCommandParam.INTERNAL_SCHEDULING:
# In Internal scheduling, it could be comfort or eco
return OVERKIZ_TO_PROFILE_MODES[
cast(
str,
self.executor.select_state(
OverkizState.IO_PASS_APC_HEATING_PROFILE
),
)
]
return OVERKIZ_TO_PRESET_MODES[heating_mode]
@property
def target_temperature(self) -> float:
"""Return hvac target temperature."""
current_heating_profile = self.current_heating_profile
if current_heating_profile in OVERKIZ_TEMPERATURE_STATE_BY_PROFILE:
return cast(
float,
self.executor.select_state(
OVERKIZ_TEMPERATURE_STATE_BY_PROFILE[current_heating_profile]
),
)
return cast(
float, self.executor.select_state(OverkizState.CORE_TARGET_TEMPERATURE)
)
async def async_set_temperature(self, **kwargs: Any) -> None:
"""Set new temperature."""
temperature = kwargs[ATTR_TEMPERATURE]
if self.hvac_mode == HVACMode.AUTO:
await self.executor.async_execute_command(
OverkizCommand.SET_COMFORT_HEATING_TARGET_TEMPERATURE,
temperature,
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_COMFORT_HEATING_TARGET_TEMPERATURE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_TARGET_TEMPERATURE
)
else:
await self.executor.async_execute_command(
OverkizCommand.SET_DEROGATED_TARGET_TEMPERATURE,
temperature,
)
await self.executor.async_execute_command(
OverkizCommand.SET_DEROGATION_ON_OFF_STATE,
OverkizCommandParam.ON,
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_TARGET_TEMPERATURE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_MODE
)
await self.executor.async_execute_command(
OverkizCommand.REFRESH_PASS_APC_HEATING_PROFILE
)
| [
"[email protected]"
] | |
e0077da32f8f0a9b4ddeb55a914be35925f10d0d | 64ec0ce37edfec33e1a7c9f074517c07161dc4b3 | /torch/overrides.py | 049115ebcd779e034d2f864af86cc6a072340b07 | [
"BSD-2-Clause",
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"BSL-1.0",
"Apache-2.0"
] | permissive | Yodai1996/pytorch | 35ffb1c1ba2f77957193e26c6c823daab5270aef | 72274e2a2fd55019ec860e1743dbdc5b0c5a5624 | refs/heads/master | 2023-07-25T01:25:09.684093 | 2021-09-08T07:22:05 | 2021-09-08T07:25:34 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 83,071 | py | """
Python implementation of ``__torch_function__``
While most of the torch API and handling for ``__torch_function__`` happens
at the C++ level, some of the torch API is written in Python so we need
python-level handling for ``__torch_function__`` overrides as well. The main
developer-facing functionality in this file are handle_torch_function and
has_torch_function. See torch/functional.py and test/test_overrides.py
for usage examples.
Note
----
heavily inspired by NumPy's ``__array_function__`` (see:
https://github.com/pytorch/pytorch/issues/24015 and
https://www.numpy.org/neps/nep-0018-array-function-protocol.html
)
If changing this file in a way that can affect ``__torch_function__`` overhead,
please report the benchmarks in ``benchmarks/overrides_benchmark``. See the
instructions in the ``README.md`` in that directory.
"""
import __future__
import collections
import functools
import types
from typing import Dict, Set, List, Any, Callable, Iterable, Type
import torch
from torch._C import (
_has_torch_function, _has_torch_function_unary,
_has_torch_function_variadic, _add_docstr)
__all__ = [
"get_ignored_functions",
"get_overridable_functions",
"get_testing_overrides",
"handle_torch_function",
"has_torch_function",
"is_tensor_like",
"is_tensor_method_or_property",
"wrap_torch_function",
]
@functools.lru_cache(None)
def get_ignored_functions() -> Set[Callable]:
"""
Return public functions that cannot be overridden by ``__torch_function__``.
Returns
-------
Set[Callable]
A tuple of functions that are publicly available in the torch API but cannot
be overridden with ``__torch_function__``. Mostly this is because none of the
arguments of these functions are tensors or tensor-likes.
Examples
--------
>>> torch.Tensor.as_subclass in torch.overrides.get_ignored_functions()
True
>>> torch.add in torch.overrides.get_ignored_functions()
False
"""
Tensor = torch.Tensor
return {
torch.typename,
torch.is_tensor,
torch.is_storage,
torch.set_default_tensor_type,
torch.set_rng_state,
torch.get_rng_state,
torch.manual_seed,
torch.initial_seed,
torch.seed,
torch.save,
torch.load,
torch.set_printoptions,
torch.fork,
torch.get_default_dtype,
torch.get_num_interop_threads,
torch.get_num_threads,
torch.init_num_threads,
torch.import_ir_module,
torch.import_ir_module_from_buffer,
torch.is_anomaly_enabled,
torch.is_grad_enabled,
torch.merge_type_from_type_comment,
torch.parse_ir,
torch.parse_schema,
torch.parse_type_comment,
torch.set_anomaly_enabled,
torch.set_flush_denormal,
torch.set_num_interop_threads,
torch.set_num_threads,
torch.wait,
torch.as_tensor,
torch.from_numpy,
torch.get_device,
torch.tensor,
torch.default_generator,
torch.has_cuda,
torch.has_cudnn,
torch.has_lapack,
torch.device,
torch.dtype,
torch.finfo,
torch.has_mkl,
torch.has_mkldnn,
torch.has_openmp,
torch.iinfo,
torch.memory_format,
torch.qscheme,
torch.set_grad_enabled,
torch.no_grad,
torch.enable_grad,
torch.inference_mode,
torch.is_inference_mode_enabled,
torch.layout,
torch.align_tensors,
torch.arange,
torch.as_strided,
torch.bartlett_window,
torch.blackman_window,
torch.broadcast_shapes,
torch.can_cast,
torch.cudnn_affine_grid_generator,
torch.cudnn_batch_norm,
torch.cudnn_convolution,
torch.cudnn_convolution_transpose,
torch.cudnn_convolution_relu,
torch.cudnn_convolution_add_relu,
torch.cudnn_grid_sampler,
torch.cudnn_is_acceptable,
torch.empty,
torch.empty_strided,
torch.empty_quantized,
torch.eye,
torch.fft.fftfreq,
torch.fft.rfftfreq,
torch.from_file,
torch.full,
torch.hamming_window,
torch.hann_window,
torch.kaiser_window,
torch.linspace,
torch.logspace,
torch.mkldnn_adaptive_avg_pool2d,
torch.mkldnn_convolution,
torch.mkldnn_convolution_backward_weights,
torch.mkldnn_max_pool2d,
torch.mkldnn_max_pool3d,
torch.mkldnn_linear_backward_weights,
torch.normal,
torch.ones,
torch.promote_types,
torch.rand,
torch.randn,
torch.randint,
torch.randperm,
torch.range,
torch.result_type,
torch.scalar_tensor,
torch.sparse_coo_tensor,
torch.sparse_csr_tensor,
torch.tril_indices,
torch.triu_indices,
torch.vander,
torch.zeros,
torch._jit_internal.boolean_dispatch,
torch.nn.functional.assert_int_or_pair,
torch.nn.functional.upsample,
torch.nn.functional.upsample_bilinear,
torch.nn.functional.upsample_nearest,
torch.nn.functional.has_torch_function,
torch.nn.functional.has_torch_function_unary,
torch.nn.functional.has_torch_function_variadic,
torch.nn.functional.handle_torch_function,
torch.nn.functional.sigmoid,
torch.nn.functional.hardsigmoid,
torch.nn.functional.tanh,
has_torch_function,
handle_torch_function,
torch.set_autocast_enabled,
torch.is_autocast_enabled,
torch.clear_autocast_cache,
torch.set_autocast_cpu_enabled,
torch.is_autocast_cpu_enabled,
torch.set_autocast_cpu_dtype,
torch.get_autocast_cpu_dtype,
torch.get_autocast_gpu_dtype,
torch.set_autocast_gpu_dtype,
torch.autocast_increment_nesting,
torch.autocast_decrement_nesting,
torch.nn.functional.hardswish,
torch.is_vulkan_available,
torch.are_deterministic_algorithms_enabled,
torch.use_deterministic_algorithms,
torch.unify_type_list,
torch.is_warn_always_enabled,
torch.set_warn_always,
torch.vitals_enabled,
torch.set_vital,
torch.read_vitals,
torch.frombuffer,
Tensor.__delitem__,
Tensor.__dir__,
Tensor.__getattribute__,
Tensor.__init__,
Tensor.__iter__,
Tensor.__init_subclass__,
Tensor.__delattr__,
Tensor.__setattr__,
Tensor.__torch_function__,
Tensor.__new__,
Tensor.__class__,
Tensor.__subclasshook__,
Tensor.as_subclass,
Tensor.reinforce,
Tensor.new,
Tensor.new_tensor,
Tensor.new_empty,
Tensor.new_empty_strided,
Tensor.new_zeros,
Tensor.new_ones,
Tensor.new_full,
Tensor._make_subclass,
Tensor.stride,
Tensor.unflatten,
Tensor.to_sparse_csr,
Tensor._reduce_ex_internal,
Tensor._fix_weakref,
Tensor._python_dispatch.__get__,
Tensor._conj,
Tensor._conj_physical,
Tensor._neg_view,
}
@functools.lru_cache(None)
def get_default_nowrap_functions() -> Set[Callable]:
"""
Return public functions that do not wrap in a subclass when invoked by
the default ``Tensor.__torch_function__`` that preserves subclasses. Typically,
these functions represent field accesses (i.e., retrieving a Tensor that
is stored somewhere on the Tensor) as opposed to computation. Users of
these functions expect object identity to be preserved over multiple accesses
(e.g., ``a.grad is a.grad``) which cannot be upheld if we're wrapping on
the fly every time (furthermore, the tensor stored here might already be
the subclass, in which case wrapping really ought not to happen).
Not ALL property accessors have this property; for example ``Tensor.T`` actually
just creates a new transposed tensor on the fly, and so we SHOULD interpose on
these calls (you need to check the implementation of the function to see if
this is the case or not). Additionally, if a property accessor doesn't return a Tensor,
it doesn't have to be on this list (though it is harmless if it is).
"""
Tensor = torch.Tensor
return {
Tensor._base.__get__,
Tensor.grad.__get__,
Tensor._grad.__get__,
}
@functools.lru_cache(None)
def get_testing_overrides() -> Dict[Callable, Callable]:
"""Return a dict containing dummy overrides for all overridable functions
Returns
-------
Dict[Callable, Callable]
A dictionary that maps overridable functions in the PyTorch API to
lambda functions that have the same signature as the real function
and unconditionally return -1. These lambda functions are useful
for testing API coverage for a type that defines ``__torch_function__``.
Examples
--------
>>> import inspect
>>> my_add = torch.overrides.get_testing_overrides()[torch.add]
>>> inspect.signature(my_add)
<Signature (input, other, out=None)>
"""
# Every function in the PyTorchAPI that can be overriden needs an entry
# in this dict.
#
# Optimally we would use inspect to get the function signature and define
# the lambda function procedurally but that is blocked by generating
# function signatures for native kernels that can be consumed by inspect.
# See Issue #28233.
Tensor = torch.Tensor
ret: Dict[Callable, Callable] = {
torch.abs: lambda input, out=None: -1,
torch.absolute: lambda input, out=None: -1,
torch.adaptive_avg_pool1d: lambda input, output_size: -1,
torch.adaptive_max_pool1d: lambda inputs, output_size: -1,
torch.acos: lambda input, out=None: -1,
torch.arccos: lambda input, out=None: -1,
torch.acosh: lambda input, out=None: -1,
torch.arccosh: lambda input, out=None: -1,
torch.add: lambda input, other, out=None: -1,
torch.addbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.addcdiv: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addcmul: lambda input, tensor1, tensor2, value=1, out=None: -1,
torch.addmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.addmv: lambda input, mat, vec, beta=1, alpha=1, out=None: -1,
torch.addr: lambda input, vec1, vec2, beta=1, alpha=1, out=None: -1,
torch.affine_grid_generator: lambda theta, size, align_corners: -1,
torch.all: lambda input, dim=None: -1,
torch.allclose: lambda input, other, trol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.alpha_dropout: lambda input, p, train, inplace=False: -1,
torch.amax: lambda input, dim=None: -1,
torch.amin: lambda input, dim=None: -1,
torch.aminmax: lambda input, dim=None, keepdim=False, out=None: -1,
torch.angle: lambda input, out=None: -1,
torch.any: lambda input, dim=None, keepdim=False, out=None: -1,
torch.argmax: lambda input: -1,
torch.argmin: lambda input: -1,
torch.argsort: lambda input, dim=None: -1,
torch.asin: lambda input, out=None: -1,
torch._assert_async: lambda input: -1,
torch.arcsin: lambda input, out=None: -1,
torch.asinh: lambda input, out=None: -1,
torch.arcsinh: lambda input, out=None: -1,
torch.atan: lambda input, out=None: -1,
torch.arctan: lambda input, out=None: -1,
torch.atan2: lambda input, other, out=None: -1,
torch.atanh: lambda input, out=None: -1,
torch.arctanh: lambda input, out=None: -1,
torch.atleast_1d: lambda *tensors: -1,
torch.atleast_2d: lambda *tensors: -1,
torch.atleast_3d: lambda *tensors: -1,
torch.avg_pool1d: lambda input, kernel_size, stride=None, padding=0, ceil_mode=False, count_include_pad=True: -1,
torch.baddbmm: lambda input, batch1, batch2, alpha=1, beta=1, out=None: -1,
torch.batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled: -1,
torch.batch_norm_backward_elemt: lambda grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count_tensor: -1,
torch.batch_norm_backward_reduce: lambda grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g: -1,
torch.batch_norm_elemt: lambda input, weight, bias, mean, invstd, eps: -1,
torch.batch_norm_gather_stats: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_gather_stats_with_counts: lambda input, mean, invstd, running_mean, running_var, momentum, eps, count: -1,
torch.batch_norm_stats: lambda input, eps: -1,
torch.batch_norm_update_stats: lambda input, running_mean, running_var, momentum: -1,
torch.bernoulli: lambda input, generator=None, out=None: -1,
torch.bilinear: lambda input1, input2, weight, bias: -1,
torch.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction='mean', pos_weight=None: -1),
torch.bincount: lambda input, weights=None, minlength=0: -1,
torch.binomial: lambda count, prob, generator=None: -1,
torch.bitwise_and: lambda input, other, out=None: -1,
torch.bitwise_not: lambda input, out=None: -1,
torch.bitwise_or: lambda input, other, out=None: -1,
torch.bitwise_xor: lambda input, other, out=None: -1,
torch.bitwise_left_shift: lambda input, other, out=None: -1,
torch.bitwise_right_shift: lambda input, other, out=None: -1,
torch.block_diag: lambda *tensors: -1,
torch.bmm: lambda input, mat2, out=None: -1,
torch.broadcast_tensors: lambda *tensors: -1,
torch.broadcast_to: lambda self, size: -1,
torch.bucketize: lambda input, boundaries, out_int32=False, right=False, out=None: -1,
torch.cartesian_prod: lambda *tensors: -1,
torch.cat: lambda tensors, dim=0, out=None: -1,
torch.concat: lambda tensors, dim=0, out=None: -1, # alias for torch.cat
torch.cdist: lambda x1, x2, p=2.0, compute_mode='use_mm_for_euclid_dist_if_necessary': -1,
torch.ceil: lambda input, out=None: -1,
torch.celu: lambda input, alhpa=1., inplace=False: -1,
torch.chain_matmul: lambda *matrices, out=None: -1,
torch.channel_shuffle: lambda input, groups : -1,
torch.cholesky: lambda input, upper=False, out=None: -1,
torch.linalg.cholesky: lambda input, out=None: -1,
torch.linalg.cholesky_ex: lambda input, check_errors=False, out=None: -1,
torch.cholesky_inverse: lambda input, upper=False, out=None: -1,
torch.cholesky_solve: lambda input1, input2, upper=False, out=None: -1,
torch.choose_qparams_optimized: lambda input, numel, n_bins, ratio, bit_width: -1,
torch.chunk: lambda input, chunks, dim=0: -1,
torch.clamp: lambda input, min=None, max=None, out=None: -1,
torch.clip: lambda input, min=None, max=None, out=None: -1,
torch.clamp_min: lambda input, min, out=None: -1,
torch.clamp_max: lambda input, max, out=None: -1,
torch.column_stack: lambda tensors, out=None: -1,
torch.cov: lambda input, correction=1, fweights=None, aweights=None: -1,
torch.clone: lambda input: -1,
torch.combinations: lambda input, r=2, with_replacement=False: -1,
torch.complex: lambda real, imag: -1,
torch.copysign: lambda input, other, out=None: -1,
torch.polar: lambda abs, ang: -1,
torch.linalg.cond: lambda input, ord=None: -1,
torch.conj: lambda input, out=None: -1,
torch.conj_physical: lambda input, out=None: -1,
torch.resolve_conj: lambda input, out=None: -1,
torch.resolve_neg: lambda input, out=None: -1,
torch.constant_pad_nd: lambda input, pad, value=0: -1,
torch.conv1d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv2d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.conv3d: lambda input, weight, bias=None, stride=1, padding=0, dilation=1, groups=1: -1,
torch.convolution: lambda input, weight, bias, stride, padding, dilation, transposed, output_adding, groups: -1,
torch.conv_tbc: lambda input, weight, bias, pad=0: -1,
torch.conv_transpose1d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose2d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.conv_transpose3d: lambda input, weight, bias=None, stride=1, padding=0, output_padding=0, groups=1, dilation=1: -1,
torch.corrcoef: lambda input: -1,
torch.cos: lambda input, out=None: -1,
torch.cosine_embedding_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1,
torch.cosh: lambda input, out=None: -1,
torch.cosine_similarity: lambda x1, x2, dim=1, eps=1e-8: -1,
torch.count_nonzero: lambda input: -1,
torch.cross: lambda input, other, dim=-1, out=None: -1,
torch.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0, reduction='mean',
zero_infinity=False: -1),
torch.cummax: lambda input, dim, out=None: -1,
torch.cummin: lambda input, dim, out=None: -1,
torch.cumprod: lambda input, dim, out=None, dtype=None: -1,
torch.cumsum: lambda input, dim, out=None, dtype=None: -1,
torch.cumulative_trapezoid: lambda y, x=None, dim=-1: -1,
torch.logcumsumexp: lambda input, dim, out=None: -1,
torch.deg2rad: lambda input, out=None: -1,
torch.dequantize: lambda input: -1,
torch.det: lambda input: -1,
torch.linalg.det: lambda input: -1, # alias for torch.det # type: ignore[attr-defined]
torch.detach: lambda input: -1,
torch.diag: lambda input, diagonal=0, out=None: -1,
torch.diag_embed: lambda input, diagonal=0, out=None: -1,
torch.diagflat: lambda input, offset=0: -1,
torch.diff: lambda input, n=1, dim=-1, prepend=None, append=None, out=None: -1,
torch.diagonal: lambda input, offset=0, dim1=0, dim2=1: -1,
torch.digamma: lambda input, out=None: -1,
torch.dist: lambda input, other, p=2: -1,
torch.div: lambda input, other, rounding_mode=None, out=None: -1,
torch.divide: lambda input, other, rounding_mode=None, out=None: -1,
torch.dot: lambda input, other, out=None: -1,
torch.dropout: lambda input, p, train, inplace=False: -1,
torch.dsmm: lambda input, mat2: -1,
torch.hsmm: lambda mat1, mat2: -1,
torch.dsplit: lambda input, indices_or_sections: -1,
torch.dstack: lambda tensors, out=None: -1,
torch.eig: lambda input, eigenvectors=False, out=None: -1,
torch.linalg.eig: lambda input, out=None: -1,
torch.linalg.eigvals: lambda input, out=None: -1,
torch.linalg.eigh: lambda input, UPLO="L", out=None: -1,
torch.linalg.eigvalsh: lambda input, UPLO="L", out=None: -1,
torch.einsum: lambda equation, *operands: -1,
torch.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0, scale_grad_by_freq=False,
sparse=False: -1),
torch.embedding_bag: (lambda input, weight, offsets, max_norm=None, norm_type=2, scale_grad_by_freq=False,
mode='mean', sparse=False, per_sample_weights=None, padding_idx=None: -1),
torch.empty_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.eq: lambda input, other, out=None: -1,
torch.equal: lambda input, other: -1,
torch.erf: lambda input, out=None: -1,
torch.erfc: lambda input, out=None: -1,
torch.erfinv: lambda input, out=None: -1,
torch.exp: lambda input, out=None: -1,
torch.exp2: lambda input, out=None: -1,
torch.expm1: lambda input, out=None: -1,
torch.fake_quantize_per_channel_affine: lambda input, scale, zero_point, axis, quant_min, quant_max: -1,
torch.fake_quantize_per_tensor_affine: lambda input, scale, zero_point, quant_min, quant_max: -1,
torch.fused_moving_avg_obs_fake_quant: (lambda x, observer_on, fake_quant_on, averaging_const, running_min,
running_max, scale, zero_point, quant_min, quant_max, ch_axis,
per_row_fake_quant=False, symmetric_quant=False: -1),
torch.fbgemm_linear_fp16_weight: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_fp16_weight_fp32_activation: lambda input, packed_weight, bias: -1,
torch.fbgemm_linear_int8_weight: lambda input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias: -1,
torch.fbgemm_linear_int8_weight_fp32_activation: (lambda input, weight, packed, col_offsets, weight_scale,
weight_zero_point, bias: -1),
torch.fbgemm_linear_quantize_weight: lambda input: -1,
torch.fbgemm_pack_gemm_matrix_fp16: lambda input: -1,
torch.fbgemm_pack_quantized_matrix: lambda input, a, b: -1,
torch.feature_alpha_dropout: lambda input, p, train: -1,
torch.feature_dropout: lambda input, p, train: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ifft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.rfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.irfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.hfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.ihfft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fft.fftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.ifftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.rfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.irfftn: lambda input, s=None, dim=None, norm=None: -1,
torch.fft.fft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.ifft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.rfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.irfft2: lambda input, s=None, dim=(-2, -1), norm=None: -1,
torch.fft.fftshift: lambda input, dim=None: -1,
torch.fft.ifftshift: lambda input, dim=None: -1,
torch.fft.fft: lambda input, n=None, dim=-1, norm=None: -1,
torch.fix: lambda input, out=None: -1,
torch.flatten: lambda input, start_dim=0, end_dim=-1: -1,
torch.flip: lambda input, dims: -1,
torch.fliplr: lambda input: -1,
torch.flipud: lambda input: -1,
torch.frobenius_norm: lambda input, dim=None, keepdim=False, out=None: -1,
torch.floor: lambda input, out=None: -1,
torch.floor_divide: lambda input, other: -1,
torch.float_power: lambda input, exponent, out=None: -1,
torch.fmod: lambda input, other, out=None: -1,
torch.frac: lambda input, out=None: -1,
torch.frexp: lambda input, out=None: -1,
torch.full_like: lambda input, fill_value, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.lu_unpack: lambda LU_data, LU_pivots, unpack_data=True, unpack_pivots=True: -1,
torch.gather: lambda input, dim, index, out=None, sparse_grad=False: -1,
torch.gcd: lambda input, other, out=None: -1,
torch.ge: lambda input, other, out=None: -1,
torch.greater_equal: lambda input, other, out=None: -1,
torch.geqrf: lambda input, out=None: -1,
torch.i0: lambda input, out=None: -1,
torch.inner: lambda input, other, out=None: -1,
torch.outer: lambda input, vec2, out=None: -1,
torch.ger: lambda input, vec2, out=None: -1, # alias for torch.outer
torch.gradient: lambda input, spacing=None, dim=None, edge_order=1: -1,
torch.grid_sampler: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_2d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.grid_sampler_3d: lambda input, grid, interpolation_mode, padding_mode, align_corners: -1,
torch.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05, cudnn_enabled=True: -1,
torch.gru: lambda input, hx, params, has_biases, num_layers, gropout, train, bidirectional, batch_first: -1,
torch.gru_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.gt: lambda input, other, out=None: -1,
torch.greater: lambda input, other, out=None: -1,
torch.hardshrink: lambda input, lambd=0.5: -1,
torch.heaviside: lambda input, values, out=None: -1,
torch.hinge_embedding_loss: lambda input, target, margin=1.0, size_average=None, reduce=None, reduction='mean': -1,
torch.histc: lambda input, bins=100, min=0, max=0, out=None: -1,
torch.histogram: lambda input, bins=100, min=None, max=None, weight=None, density=False, out=None: -1,
torch.linalg.householder_product: lambda input, tau: -1,
torch.hspmm: lambda mat1, mat2, out=None: -1,
torch.hsplit: lambda input, indices_or_sections: -1,
torch.hstack: lambda tensors, out=None: -1,
torch.hypot: lambda input, other, out=None: -1,
torch.igamma: lambda input, other, out=None: -1,
torch.igammac: lambda input, other, out=None: -1,
torch.imag: lambda input, out=None: -1,
torch.index_add: lambda input, dim, index, source: -1,
torch.index_copy: lambda input, dim, index, source: -1,
torch.index_put: lambda input, indices, values, accumulate=False: -1,
torch.index_select: lambda input, dim, index, out=None: -1,
torch.index_fill: lambda input, dim, index, value: -1,
torch.isfinite: lambda tensor: -1,
torch.isin: lambda e, te, assume_unique=False, invert=False: -1,
torch.isinf: lambda tensor: -1,
torch.isreal: lambda tensor: -1,
torch.isposinf: lambda input, out=None: -1,
torch.isneginf: lambda input, out=None: -1,
torch.instance_norm: (lambda input, running_mean, running_var, weight, bias, use_input_stats, momentum, eps,
cudnn_enabled: -1),
torch.int_repr: lambda input: -1,
torch.inverse: lambda input, out=None: -1,
torch.linalg.inv: lambda input, out=None: -1,
torch.linalg.inv_ex: lambda input, check_errors=False, out=None: -1,
torch.is_complex: lambda input: -1,
torch.is_conj: lambda input: -1,
torch.is_neg: lambda input: -1,
torch.is_distributed: lambda input: -1,
torch.is_inference: lambda input: -1,
torch.is_floating_point: lambda input: -1,
torch.is_nonzero: lambda input: -1,
torch.is_same_size: lambda input, other: -1,
torch.is_signed: lambda input: -1,
torch.isclose: lambda input, other, rtol=1e-05, atol=1e-08, equal_nan=False: -1,
torch.isnan: lambda input: -1,
torch.istft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
normalized=False, onesided=None, length=None, return_complex=False: -1),
torch.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.kron: lambda input, other: -1,
torch.kthvalue: lambda input, k, dim=None, keepdim=False, out=None: -1,
torch.layer_norm: lambda input, normalized_shape, weight=None, bias=None, esp=1e-05, cudnn_enabled=True: -1,
torch.lcm: lambda input, other, out=None: -1,
torch.ldexp: lambda input, other, out=None: -1,
torch.le: lambda input, other, out=None: -1,
torch.less_equal: lambda input, other, out=None: -1,
torch.lerp: lambda input, end, weight, out=None: -1,
torch.lgamma: lambda input, out=None: -1,
torch.lobpcg: lambda input, k=None, B=None, X=None, n=None, iK=None, niter=None, tol=None, largest=None, method=None,
tracker=None, ortho_iparams=None, ortho_fparams=None, ortho_bparams=None: -1,
torch.log: lambda input, out=None: -1,
torch.log_softmax: lambda input, dim, dtype=None: -1,
torch.log10: lambda input, out=None: -1,
torch.log1p: lambda input, out=None: -1,
torch.log2: lambda input, out=None: -1,
torch.logaddexp: lambda input, other, out=None: -1,
torch.logaddexp2: lambda input, other, out=None: -1,
torch.logdet: lambda input: -1,
torch.xlogy: lambda x, y, out=None: -1,
torch.logical_and: lambda input, other, out=None: -1,
torch.logical_not: lambda input, out=None: -1,
torch.logical_or: lambda input, other, out=None: -1,
torch.logical_xor: lambda input, other, out=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.logit: lambda input, eps=None: -1,
torch.logsumexp: lambda input, names, keepdim=False, out=None: -1,
torch.lstm: lambda data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional: -1,
torch.lstm_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.lstsq: lambda input, A, out=None: -1,
torch.lt: lambda input, other, out=None: -1,
torch.less: lambda input, other, out=None: -1,
torch.lu: lambda A, pivot=True, get_infos=False, out=None: -1,
torch.lu_solve: lambda b, LU_data, LU_pivots, out=None: -1,
torch.margin_ranking_loss: lambda input1, input2, target, margin=0, size_average=None, reduce=None, reduction='mean': -1, # type: ignore[attr-defined] # noqa: B950
torch.masked_fill: lambda input, mask, value: -1,
torch.masked_scatter: lambda input, mask, source: -1,
torch.masked_select: lambda input, mask, out=None: -1,
torch.matmul: lambda input, other, out=None: -1,
torch.linalg.matmul: lambda input, other, out=None: -1, # alias for torch.matmul
torch.matrix_power: lambda input, n: -1,
torch.linalg.matrix_power: lambda input, n, out=None: -1,
torch.matrix_rank: lambda input, tol=None, symmetric=False: -1,
torch.linalg.matrix_rank: lambda input, tol=None, hermitian=False: -1,
torch.linalg.multi_dot: lambda tensors, out=None: -1,
torch.matrix_exp: lambda input: -1,
torch.max: lambda input, out=None: -1,
torch.maximum: lambda input, other, out=None: -1,
torch.fmax: lambda input, other, out=None: -1,
torch.max_pool1d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool2d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool3d: lambda input, kernel_size, stride=None, padding=0, dilation=1, ceil_mode=False: -1,
torch.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.mean: lambda input, dim=None: -1,
torch.median: lambda input, dim=None: -1,
torch.nanmedian: lambda input, dim=None: -1,
torch.meshgrid: lambda *tensors, **kwargs: -1,
torch.min: lambda input, out=None: -1,
torch.minimum: lambda input, other, out=None: -1,
torch.fmin: lambda input, other, out=None: -1,
torch.miopen_batch_norm: (lambda input, weight, bias, running_mean, running_var, training,
exponential_average_factor, epsilon: -1),
torch.miopen_convolution: lambda input, weight, bias, padding, stride, dilation, groups, benchmark, deterministic: -1,
torch.miopen_convolution_transpose: (lambda input, weight, bias, padding, output_padding, stride, dilation,
groups, benchmark, deterministic: -1),
torch.miopen_depthwise_convolution: (lambda input, weight, bias, padding, stride, dilation, groups, benchmark,
deterministic: -1),
torch.miopen_rnn: (lambda input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first,
dropout, train, bidirectional, batch_sizes, dropout_state: -1),
torch.mm: lambda input, mat2, out=None: -1,
torch.mode: lambda input, dim=-1, keepdim=False, out=None: -1,
torch.movedim: lambda input, source, destination: -1,
torch.moveaxis: lambda input, source, destination: -1,
torch.msort: lambda input, descending=False, out=None: -1,
torch.mul: lambda input, other, out=None: -1,
torch.multiply: lambda input, other, out=None: -1,
torch.multinomial: lambda input, num_samples, replacement=False, out=None: -1,
torch.mv: lambda input, vec, out=None: -1,
torch.mvlgamma: lambda input, p: -1,
torch.narrow: lambda input, dim, start, length: -1,
torch.narrow_copy: lambda input, dim, start, length: -1,
torch.nan_to_num: lambda input, nan=0.0, posinf=None, neginf=None, out=None: -1,
torch.native_batch_norm: lambda input, weight, bias, running_mean, running_var, training, momentum, eps: -1,
torch.native_layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.native_group_norm: lambda input, weight, bias, N, C, HxW, group, eps: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2: -1,
torch.native_norm: lambda input, p=2, dim=None, keepdim=False, dtype=None: -1,
torch.ne: lambda input, other, out=None: -1,
torch.not_equal: lambda input, other, out=None: -1,
torch.neg: lambda input, out=None: -1,
torch.negative: lambda input, out=None: -1,
torch.nextafter: lambda input, other, out=None: -1,
torch.nn.functional.adaptive_avg_pool2d: lambda input, output_size: -1,
torch.nn.functional.adaptive_avg_pool3d: lambda input, output_size: -1,
torch.nn.functional.adaptive_max_pool1d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool1d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool2d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.adaptive_max_pool3d_with_indices: lambda input, output_size, return_indices=False: -1,
torch.nn.functional.affine_grid: lambda theta, size, align_corners=None: -1,
torch.nn.functional.alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.avg_pool2d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.avg_pool3d: (lambda input, kernel_size, stride=None, padding=0, ceil_mode=False,
count_include_pad=True, divisor_override=None: -1),
torch.nn.functional.batch_norm: (lambda input, running_mean, running_var, weight=None, bias=None, training=False,
momentum=0.1, eps=1e-05: -1),
torch.nn.functional.bilinear: lambda input1, input2, weight, bias=None: -1,
torch.nn.functional.binary_cross_entropy: (lambda input, target, weight=None, size_average=None, reduce=None,
reduction="mean": -1),
torch.nn.functional.binary_cross_entropy_with_logits: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction="mean", pos_weight=None: -1),
torch.nn.functional.celu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.cosine_embedding_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.cross_entropy: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction="mean", label_smoothing=0.0: -1),
torch.nn.functional.ctc_loss: (lambda log_probs, targets, input_lengths, target_lengths, blank=0,
reduction='mean', zero_infinity=False: -1),
torch.nn.functional.dropout: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout2d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.dropout3d: lambda input, p=0.5, training=True, inplace=False: -1,
torch.nn.functional.elu: lambda input, alpha=1.0, inplace=False: -1,
torch.nn.functional.embedding: (lambda input, weight, padding_idx=None, max_norm=None, norm_type=2.0,
scale_grad_by_freq=False, sparse=False: -1),
torch.nn.functional.embedding_bag: (lambda input, weight, offsets=None, max_norm=None, norm_type=2,
scale_grad_by_freq=False, mode='mean', sparse=False, per_sample_weights=None,
include_last_offset=False, padding_idx=None: -1),
torch.nn.functional.feature_alpha_dropout: lambda input, p=0.5, training=False, inplace=False: -1,
torch.nn.functional.fold: lambda input, output_size, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nn.functional.fractional_max_pool2d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool2d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d: (lambda input, kernel_size, output_size=None, output_ratio=None,
return_indices=False, _random_samples=None: -1),
torch.nn.functional.fractional_max_pool3d_with_indices: (
lambda input, kernel_size, output_size=None, output_ratio=None, return_indices=False,
_random_samples=None: -1),
torch.nn.functional.gaussian_nll_loss: lambda input, target, var, full=False, eps=1e-06, reduction='mean': -1,
torch.nn.functional.gelu: lambda input: -1,
torch.nn.functional.glu: lambda input, dim=-1: -1,
torch.nn.functional.grid_sample: lambda input, grid, mode='bilinear', padding_mode='zeros', align_corners=None: -1,
torch.nn.functional.group_norm: lambda input, num_groups, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.gumbel_softmax: lambda logits, tau=1, hard=False, eps=1e-10, dim=-1: -1,
torch.nn.functional.hardshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.hardtanh: lambda input, min_val=-1., max_val=1., inplace=False: -1,
torch.nn.functional.hinge_embedding_loss: (lambda input, target, margin=1.0, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.instance_norm: (lambda input, running_mean=None, running_var=None, weight=None, bias=None,
use_input_stats=True, momentum=0.1, eps=1e-05: -1),
torch.nn.functional.interpolate: (lambda input, size=None, scale_factor=None, mode='nearest', align_corners=None,
recompute_scale_factor=None: -1),
torch.nn.functional.kl_div: lambda input, target, size_average=None, reduce=None, reduction='mean', log_target=False: -1,
torch.nn.functional.l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.layer_norm: lambda input, normalized_shape, weight=None, bias=None, eps=1e-05: -1,
torch.nn.functional.leaky_relu: lambda input, negative_slope=0.01, inplace=False: -1,
torch.nn.functional.linear: lambda input, weight, bias=None: -1,
torch.nn.functional.local_response_norm: lambda input, size, alpha=0.0001, beta=0.75, k=1.0: -1,
torch.nn.functional.log_softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.logsigmoid: lambda input: -1,
torch.nn.functional.lp_pool1d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.lp_pool2d: lambda input, norm_type, kernel_size, stride=None, ceil_mode=False: -1,
torch.nn.functional.margin_ranking_loss: (lambda input1, input2, target, margin=0, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.max_pool1d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool1d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool2d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
ceil_mode=False, return_indices=False: -1),
torch.nn.functional.max_pool2d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_pool3d_with_indices: (lambda input, kernel_size, stride=None, padding=0, dilation=1,
return_indices=False, ceil_mode=False: -1),
torch.nn.functional.max_unpool1d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool2d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.max_unpool3d: lambda input, indices, kernel_size, stride=None, padding=0, output_size=None: -1,
torch.nn.functional.mse_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.multi_head_attention_forward: (
lambda query, key, value, embed_dim_to_check, num_heads, in_proj_weight, in_proj_bias, bias_k, bias_v,
add_zero_attn, dropout_p, out_proj_weight, out_proj_bias, training=True, key_padding_mask=None,
need_weights=True, attn_mask=None, use_separate_proj_weight=False, q_proj_weight=None, k_proj_weight=None,
v_proj_weight=None, static_k=None, static_v=None: -1),
torch.nn.functional.multi_margin_loss: (lambda input, target, p=1, margin=1.0, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.multilabel_margin_loss: (lambda input, target, size_average=None, reduce=None,
reduction='mean': -1),
torch.nn.functional.multilabel_soft_margin_loss: (lambda input, target, weight=None, size_average=None,
reduce=None, reduction='mean': -1),
torch.nn.functional.nll_loss: (lambda input, target, weight=None, size_average=None, ignore_index=-100,
reduce=None, reduction='mean': -1),
torch.nn.functional.normalize: lambda input, p=2, dim=1, eps=1e-12, out=None: -1,
torch.nn.functional.one_hot: lambda tensor, num_classes=-1: -1,
torch.nn.functional.pad: lambda input, pad, mode='constant', value=0: -1,
torch.nn.functional.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.nn.functional.poisson_nll_loss: (lambda input, target, log_input=True, full=False, size_average=None,
eps=1e-08, reduce=None, reduction='mean': -1),
torch.nn.functional.prelu: lambda input, weight: -1,
torch.nn.functional.relu: lambda input, inplace=False: -1,
torch.nn.functional.relu6: lambda input, inplace=False: -1,
torch.nn.functional.rrelu: lambda input, lower=0.125, upper=0.3333333333333333, training=False, inplace=False: -1,
torch.nn.functional.selu: lambda input, inplace=False: -1,
torch.nn.functional.silu: lambda input, inplace=False: -1,
torch.nn.functional.mish: lambda input, inplace=False: -1,
torch.nn.functional.smooth_l1_loss: lambda input, target, size_average=None, reduce=None, reduction='mean', beta=1.: -1,
torch.nn.functional.huber_loss: lambda input, target, reduction='mean', delta=1.: -1,
torch.nn.functional.soft_margin_loss: lambda input, target, size_average=None, reduce=None, reduction='mean': -1,
torch.nn.functional.softmax: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softmin: lambda input, dim=None, _stacklevel=3, dtype=None: -1,
torch.nn.functional.softplus: lambda input, beta=1, threshold=20: -1,
torch.nn.functional.softshrink: lambda input, lambd=0.5: -1,
torch.nn.functional.softsign: lambda input: -1,
torch.nn.functional.tanhshrink: lambda input: -1,
torch.nn.functional.threshold: lambda input, threshold, value, inplace=False: -1,
torch.nn.functional.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06,
swap=False, size_average=None, reduce=None, reduction='mean': -1),
torch.nn.functional.triplet_margin_with_distance_loss: (lambda anchor, positive, negative, *,
distance_function=None, margin=1.0,
swap=False, reduction='mean': -1),
torch.nn.functional.unfold: lambda input, kernel_size, dilation=1, padding=0, stride=1: -1,
torch.nonzero: lambda input, as_tuple=False: -1,
torch.norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.norm: lambda input, ord=None, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.vector_norm: lambda input, ord=2, dim=None, keepdim=False, out=None, dtype=None: -1,
torch.linalg.matrix_norm: lambda input, ord='fro', dim=(-2, -1), keepdim=False, out=None, dtype=None: -1,
torch.norm_except_dim: lambda v, pow=2, dim=0: -1,
torch.nuclear_norm: lambda input, p='fro', dim=None, keepdim=False, out=None, dtype=None: -1,
torch.numel: lambda input: -1,
torch.orgqr: lambda input, tau: -1,
torch.ormqr: lambda input, input2, input3, left=True, transpose=False: -1,
torch.pairwise_distance: lambda x1, x2, p=2.0, eps=1e-06, keepdim=False: -1,
torch.permute: lambda self, dim: -1,
torch.pca_lowrank: lambda input, q=None, center=True, niter=2: -1,
torch.pdist: lambda input, p=2: -1,
torch.pinverse: lambda input, rcond=1e-15: -1,
torch.linalg.pinv: lambda input, rcond=1e-15, hermitian=False: -1,
torch.pixel_shuffle: lambda input, upscale_factor: -1,
torch.pixel_unshuffle: lambda input, downscale_factor: -1,
torch.poisson: lambda input, generator=None: -1,
torch.poisson_nll_loss: lambda input, target, log_input, full, eps, reduction: -1,
torch.polygamma: lambda input, n, out=None: -1,
torch.positive: lambda input, out=None: -1,
torch.prelu: lambda input, weight: -1,
torch.ones_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.pow: lambda input, exponent, out=None: -1,
torch.prod: lambda input, dtype=None: -1,
torch.put: lambda input, index, source, accumulate=False: -1,
torch.q_per_channel_axis: lambda input: -1,
torch.q_per_channel_scales: lambda input: -1,
torch.q_per_channel_zero_points: lambda input: -1,
torch.q_scale: lambda input: -1,
torch.q_zero_point: lambda input: -1,
torch.qr: lambda input, some=True, out=None: -1,
torch.linalg.qr: lambda input, mode='reduced', out=None: -1,
torch.quantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.nanquantile: lambda input, q, dim=None, keepdim=False, out=None: -1,
torch.quantize_per_channel: lambda input, scales, zero_points, axis, dtype: -1,
torch.quantize_per_tensor: lambda input, scale, zero_point, dtype: -1,
torch.quantized_batch_norm: lambda input, weight, bias, mean, var, eps, output_scale, output_zero_point: -1,
torch.quantized_gru_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_lstm_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_max_pool1d: (lambda input, kernel_size, stride=tuple(), padding=(0,),
dilation=(1,), ceil_mode=False: -1),
torch.quantized_max_pool2d: (lambda input, kernel_size, stride=tuple(), padding=(0, 0),
dilation=(1, 1), ceil_mode=False: -1),
torch.quantized_rnn_relu_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.quantized_rnn_tanh_cell: (lambda input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih,
col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh: -1),
torch.rad2deg: lambda input, out=None: -1,
torch.rand_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.randint_like: lambda input, high, dtype=None, layout=torch.strided, device=None, requires_grad=False: -1,
torch.randn_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
torch.ravel: lambda input: -1,
torch.real: lambda input, out=None: -1,
torch.vdot: lambda input, other, out=None: -1,
torch.view_as_real: lambda input: -1,
torch.view_as_complex: lambda input: -1,
torch.reciprocal: lambda input, out=None: -1,
torch.relu: lambda input, inplace=False: -1,
torch.remainder: lambda input, other, out=None: -1,
torch.renorm: lambda input, p, dim, maxnorm, out=None: -1,
torch.repeat_interleave: lambda input, dim=None: -1,
torch.reshape: lambda input, shape: -1,
torch.rnn_relu: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_relu_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.rnn_tanh: lambda input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first: -1,
torch.rnn_tanh_cell: lambda input, hx, w_ih, w_hh, b_ih=None, b_hh=None: -1,
torch.roll: lambda input, shifts, dims=None: -1,
torch.rot90: lambda input, k=1, dims=(0, 1): -1,
torch.round: lambda input, out=None: -1,
torch.row_stack: lambda tensors, out=None: -1, # alias for torch.vstack
torch._rowwise_prune: (lambda weight, mask, compressed_indices_dtype: -1),
torch.rrelu: lambda input, lower=1. / 8, upper=1. / 3, training=False, inplace=False: -1,
torch.rsqrt: lambda input, out=None: -1,
torch.rsub: lambda input, other, alpha=1: -1,
torch.saddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.scatter: lambda input, dim, index, src: -1,
torch.scatter_add: lambda input, dim, index, src: -1,
torch.searchsorted: lambda sorted_sequence, input, out_int32=False, right=False, out=None: -1,
torch.segment_reduce: lambda data, reduce="max", lengths=None, indices=None, axis=0, unsafe=False: -1,
torch.select: lambda input, dim, index: -1,
torch.selu: lambda input, inplace=False: -1,
torch.sigmoid: lambda input, out=None: -1,
torch.sign: lambda input, out=None: -1,
torch.signbit: lambda input, out=None: -1,
torch.sgn: lambda input, out=None: -1,
torch.sin: lambda input, out=None: -1,
torch.sinc: lambda input, out=None: -1,
torch.sinh: lambda input, out=None: -1,
torch.slogdet: lambda input: -1,
torch.linalg.slogdet: lambda input: -1,
torch.smm: lambda input, mat2: -1,
torch.spmm: lambda input, mat2: -1,
torch.softmax: lambda input, dim, dtype=None: -1,
torch.solve: lambda input, A, out=None: -1,
torch.linalg.solve: lambda input, other, out=None: -1,
torch.sort: lambda input, dim=-1, descending=False, *, stable=False, out=None: -1,
torch.split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.sqrt: lambda input, out=None: -1,
torch.square: lambda input, out=None: -1,
torch.squeeze: lambda input, dim=None, out=None: -1,
torch.sspaddmm: lambda input, mat1, mat2, beta=1, alpha=1, out=None: -1,
torch.stack: lambda tensors, dim=0, out=None: -1,
torch.std: lambda input, dim=None: -1,
torch.std_mean: lambda input, dim=None: -1,
torch.stft: (lambda input, n_fft, hop_length=None, win_length=None, window=None, center=True,
pad_mode='reflect', normalized=False, onesided=True, return_complex=None: -1),
torch.sub: lambda input, other, out=None: -1,
torch.subtract: lambda input, other, out=None: -1,
torch.sum: lambda input, dim=None: -1,
torch.nansum: lambda input, dim=None: -1,
torch.svd: lambda input, some=True, compute_uv=True, out=None: -1,
torch.svd_lowrank: lambda input, q=6, niter=2, M=None: -1,
torch.linalg.svd: lambda input, full_matrices=True, out=None: -1,
torch.linalg.svdvals: lambda input, out=None: -1,
torch.symeig: lambda input, eigenvectors=False, upper=True, out=None: -1,
torch.swapaxes: lambda input, dim0, dim1: -1,
torch.swapdims: lambda input, axis0, axis1: -1,
torch.special.entr: lambda input: -1,
torch.special.erf: lambda input: -1,
torch.special.erfc: lambda input: -1,
torch.special.erfcx: lambda input: -1,
torch.special.erfinv: lambda input: -1,
torch.special.exp2: lambda input: -1,
torch.special.expm1: lambda input: -1,
torch.special.expit: lambda input: -1,
torch.special.polygamma: lambda input, n, out=None: -1,
torch.special.digamma: lambda input: -1,
torch.special.psi: lambda input: -1,
torch.special.gammainc: lambda input, other, out=None: -1,
torch.special.gammaincc: lambda input, other, out=None: -1,
torch.special.gammaln: lambda input: -1,
torch.special.i0: lambda input: -1,
torch.special.i0e: lambda input: -1,
torch.special.i1: lambda input: -1,
torch.special.i1e: lambda input: -1,
torch.special.logit: lambda input: -1,
torch.special.logsumexp: lambda input, dim, keepdim=False, out=None: -1,
torch.special.log1p: lambda input: -1,
torch.special.log_softmax: lambda input, dim, dtype=None: -1,
torch.special.round: lambda input: -1,
torch.special.sinc: lambda input: -1,
torch.special.multigammaln: lambda input, p: -1,
torch.special.ndtri: lambda input: -1,
torch.special.ndtr: lambda input: -1,
torch.special.xlogy: lambda input, other, out=None: -1,
torch.special.xlog1py: lambda input, other, out=None: -1,
torch.special.zeta: lambda self, other, out=None: -1,
torch.t: lambda input: -1,
torch.take: lambda input, index: -1,
torch.take_along_dim: lambda input, indices, dim=None, out=None: -1,
torch.tan: lambda input, out=None: -1,
torch.tanh: lambda input, out=None: -1,
torch.linalg.tensorinv: lambda a, ind=2: -1,
torch.linalg.tensorsolve: lambda a, b, dims=None: -1,
torch.tensordot: lambda a, b, dims=2, out=None: -1,
torch.tensor_split: lambda input, indices_or_sections, dim=0: -1,
torch.threshold: lambda input, threshold, value, inplace=False: -1,
torch.tile: lambda input, dims: -1,
torch.topk: lambda input, k, dim=-1, descending=False, out=None: -1,
torch.trace: lambda input: -1,
torch.transpose: lambda input, dim0, dim1: -1,
torch.trapz: lambda y, x=None, dim=-1: -1,
torch.trapezoid: lambda y, x=None, dim=-1: -1,
torch.triangular_solve: lambda input, A, upper=True, transpose=False, unitriangular=False: -1,
torch.tril: lambda input, diagonal=0, out=None: -1,
torch.triplet_margin_loss: (lambda anchor, positive, negative, margin=1.0, p=2, eps=1e-06, swap=False,
size_average=None, reduce=None, reduction='mean': -1),
torch.triu: lambda input, diagonal=0, out=None: -1,
torch.true_divide: lambda input, other: -1,
torch.trunc: lambda input, out=None: -1,
torch.unbind: lambda input, dim=0: -1,
torch.unique: lambda input, sorted=True, return_inverse=False, return_counts=False, dim=None: -1,
torch.unique_consecutive: lambda input, return_inverse=False, return_counts=False, dim=None: -1,
torch.unsafe_chunk: lambda input, chunks, dim=0: -1,
torch.unsafe_split: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsafe_split_with_sizes: lambda tensor, split_size_or_sections, dim=0: -1,
torch.unsqueeze: lambda input, dim, out=None: -1,
torch.var: lambda input, dim=None: -1,
torch.var_mean: lambda input, dim=None: -1,
torch.vsplit: lambda input, indices_or_sections: -1,
torch.vstack: lambda tensors, out=None: -1,
torch.where: lambda condition, x=None, y=None: -1,
torch.zeros_like: lambda input, dtype=None, layout=None, device=None, requires_grad=False: -1,
Tensor.__floordiv__: lambda self, other: -1,
Tensor.__rfloordiv__: lambda self, other: -1,
Tensor.__ifloordiv__: lambda self, other: -1,
Tensor.__truediv__: lambda self, other: -1,
Tensor.__rtruediv__: lambda self, other: -1,
Tensor.__itruediv__: lambda self, other: -1,
Tensor.__lshift__: lambda self, other: -1,
Tensor.__rlshift__: lambda self, other: -1,
Tensor.__ilshift__: lambda self, other: -1,
Tensor.__rshift__: lambda self, other: -1,
Tensor.__rrshift__: lambda self, other: -1,
Tensor.__irshift__: lambda self, other: -1,
Tensor.__and__: lambda self, other: -1,
Tensor.__or__: lambda self, other: -1,
Tensor.__xor__: lambda self, other: -1,
Tensor.__float__: lambda self: -1,
Tensor.__complex__: lambda self: -1,
Tensor.__array__: lambda self, dtype: -1,
Tensor.__bool__: lambda self: -1,
Tensor.__contains__: lambda self, other: -1,
Tensor.__neg__: lambda self: -1,
Tensor.__invert__: lambda self: -1,
Tensor.__mod__: lambda self, other: -1,
Tensor.__rmod__: lambda self, other: -1,
Tensor.__imod__: lambda self, other: -1,
Tensor.__array_wrap__: lambda self, array: -1,
Tensor.__getitem__: lambda self, idx: -1,
Tensor.__deepcopy__: lambda self, memo: -1,
Tensor.__int__: lambda self: -1,
Tensor.__long__: lambda self: -1,
Tensor.__hash__: lambda self: -1,
Tensor.__index__: lambda self: -1,
Tensor.__len__: lambda self: -1,
Tensor.__format__: lambda self, format_spec: -1,
Tensor.__reduce_ex__: lambda self, proto: -1,
Tensor.__reversed__: lambda self: -1,
Tensor.__repr__: lambda self: -1,
Tensor.__setitem__: lambda self, k, v: -1,
Tensor.__setstate__: lambda self, d: -1,
Tensor.T.__get__: lambda self: -1,
Tensor._backward_hooks.__get__: lambda self: -1,
Tensor._base.__get__: lambda self: -1,
Tensor._cdata.__get__: lambda self: -1,
Tensor.grad.__get__: lambda self: -1,
Tensor._grad.__get__: lambda self: -1,
Tensor._grad_fn.__get__: lambda self: -1,
Tensor.grad_fn.__get__: lambda self: -1,
Tensor._version.__get__: lambda self: -1,
Tensor.data.__get__: lambda self: -1,
Tensor.device.__get__: lambda self: -1,
Tensor.dtype.__get__: lambda self: -1,
Tensor.is_cuda.__get__: lambda self: -1,
Tensor.is_xpu.__get__: lambda self: -1,
Tensor.is_leaf.__get__: lambda self: -1,
Tensor.retains_grad.__get__: lambda self: -1,
Tensor.is_meta.__get__: lambda self: -1,
Tensor.is_mlc.__get__: lambda self: -1,
Tensor.is_ort.__get__: lambda self: -1,
Tensor.is_mkldnn.__get__: lambda self: -1,
Tensor.is_quantized.__get__: lambda self: -1,
Tensor.is_sparse.__get__: lambda self: -1,
Tensor.is_sparse_csr.__get__: lambda self: -1,
Tensor.is_vulkan.__get__: lambda self: -1,
Tensor.layout.__get__: lambda self: -1,
Tensor.name.__get__: lambda self: -1,
Tensor.names.__get__: lambda self: -1,
Tensor.ndim.__get__: lambda self: -1,
Tensor.output_nr.__get__: lambda self: -1,
Tensor.requires_grad.__get__: lambda self: -1,
Tensor.shape.__get__: lambda self: -1,
Tensor.volatile.__get__: lambda self: -1,
Tensor.real.__get__: lambda self: -1,
Tensor.imag.__get__: lambda self: -1,
Tensor.__cuda_array_interface__.__get__: lambda self: -1,
Tensor.type: lambda self, dtype=None, non_blocking=False, **kwargs: -1,
Tensor._coalesced_: lambda self: -1,
Tensor._dimI: lambda self: -1,
Tensor._dimV: lambda self: -1,
Tensor._indices: lambda self: -1,
Tensor._is_view: lambda self: -1,
Tensor._nnz: lambda self: -1,
Tensor.crow_indices: lambda self: -1,
Tensor.col_indices: lambda self: -1,
Tensor._update_names: lambda self, names, inplace: -1,
Tensor._values: lambda self: -1,
Tensor.align_as: lambda self, other: -1,
Tensor.align_to: lambda self, order, ellipsis_idx: -1,
Tensor.apply_: lambda self, callable: -1,
Tensor.as_strided: lambda self, size, stride: -1,
Tensor.as_strided_: lambda self, size, stride: -1,
Tensor.backward: lambda self, gradient=None, retain_graph=None, create_graph=False, inputs=None: -1,
Tensor.bfloat16: lambda self, memory_format=torch.preserve_format: -1,
Tensor.bool: lambda self, memory_format=torch.preserve_format: -1,
Tensor.byte: lambda self, memory_format=torch.preserve_format: -1,
Tensor.char: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cauchy_: lambda self, median=0, sigma=1, *, generator=None: -1,
Tensor.coalesce: lambda self: -1,
Tensor._coalesced_: lambda self, coalesced: -1,
Tensor.contiguous: lambda self, memory_format=torch.contiguous_format: -1,
Tensor.copy_: lambda self, src, non_blocking=False: -1,
Tensor.cpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cuda: lambda self, memory_format=torch.preserve_format: -1,
Tensor.xpu: lambda self, memory_format=torch.preserve_format: -1,
Tensor.data_ptr: lambda self: -1,
Tensor.dense_dim: lambda self: -1,
Tensor.dim: lambda self: -1,
Tensor.double: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cdouble: lambda self, memory_format=torch.preserve_format: -1,
Tensor.element_size: lambda self: -1,
Tensor.expand: lambda self, size: -1,
Tensor.expand_as: lambda self, other: -1,
Tensor.exponential_: lambda self, lambd=1, *, generator=None: -1,
Tensor.fill_: lambda self, value: -1,
Tensor.fill_diagonal_: lambda self, value: -1,
Tensor.float: lambda self, memory_format=torch.preserve_format: -1,
Tensor.cfloat: lambda self, memory_format=torch.preserve_format: -1,
Tensor.geometric_: lambda self, p, *, generator=None: -1,
Tensor.get_device: lambda self: -1,
Tensor.half: lambda self, memory_format=torch.preserve_format: -1,
Tensor.has_names: lambda self: -1,
Tensor.indices: lambda self: -1,
Tensor.int: lambda self, memory_format=torch.preserve_format: -1,
Tensor.is_coalesced: lambda self: -1,
Tensor.is_contiguous: lambda self: -1,
Tensor.is_inference: lambda self: -1,
Tensor.is_pinned: lambda self: -1,
Tensor.is_set_to: lambda self, tensor: -1,
Tensor.is_shared: lambda self: -1,
Tensor.item: lambda self: -1,
Tensor.log_normal_: lambda self, mean=1, std=2, *, generator=None: -1,
Tensor.log_softmax: lambda self, dim: -1,
Tensor.long: lambda self, memory_format=torch.preserve_format: -1,
Tensor.map_: lambda self, tensor, callable: -1,
Tensor.map2_: lambda self, x, y, callable: -1,
Tensor.mm: lambda self, mat2: -1,
Tensor.narrow_copy: lambda self, dimension, start, length: -1,
Tensor.ndimension: lambda self: -1,
Tensor.nelement: lambda self: -1,
Tensor.normal_: lambda self: -1,
Tensor.numpy: lambda self: -1,
Tensor.permute: lambda self, dim: -1,
Tensor.pin_memory: lambda self: -1,
Tensor.put_: lambda self, indices, tensor, accumulate=False: -1,
Tensor.qscheme: lambda self: -1,
Tensor.random_: lambda self, from_=0, to=None, *, generator=None: -1,
Tensor.record_stream: lambda self, stream: -1,
Tensor.refine_names: lambda self, names: -1,
Tensor.register_hook: lambda self, hook: -1,
Tensor.rename: lambda self, name: -1,
Tensor.repeat: lambda self, *size: -1,
Tensor.requires_grad_: lambda self, requires_grad=True: -1,
Tensor.reshape_as: lambda self, other: -1,
Tensor.resize: lambda self, *size: -1,
Tensor.resize_: lambda self, size: -1,
Tensor.resize_as: lambda self, other: -1,
Tensor.retain_grad: lambda self: -1,
Tensor.set_: lambda self, source=None, storage_offset=0, size=None, stride=None: -1,
Tensor.share_memory_: lambda self: -1,
Tensor.short: lambda self, memory_format=torch.preserve_format: -1,
Tensor.size: lambda self: -1,
Tensor.sparse_dim: lambda self: -1,
Tensor.sparse_mask: lambda self, mask: -1,
Tensor.sparse_resize_: lambda self, size1, size2, dense_dim: -1,
Tensor.sparse_resize_and_clear_: lambda self, size1, size2, dense_dim: -1,
Tensor.sspaddmm: lambda self, mat1, mat2, beta=1, alpha=1, out=None: -1,
Tensor.storage: lambda self: -1,
Tensor.storage_offset: lambda self: -1,
Tensor.storage_type: lambda self: -1,
Tensor.sum_to_size: lambda self, size: -1,
Tensor.tile: lambda self, *reps: -1,
Tensor.to: lambda self, dtype, non_blocking=False, copy=False, memory_format=torch.preserve_format: -1,
Tensor.to_dense: lambda self: -1,
Tensor.to_sparse: lambda self: -1,
Tensor.tolist: lambda self: -1,
Tensor.to_mkldnn: lambda self: -1,
Tensor.type_as: lambda self, other: -1,
Tensor.unfold: lambda self, dimension, size, step: -1,
Tensor.uniform_: lambda self, from_=0, to=1: -1,
Tensor.values: lambda self: -1,
Tensor.view: lambda self, shape: -1,
Tensor.view_as: lambda self, other: -1,
Tensor.zero_: lambda self: -1,
torch.linalg.lstsq: lambda self, b, cond=None, driver=None: -1,
}
ret2 = {}
ignored = get_ignored_functions()
for k, v in ret.items():
# Generate methods like __add__ and add_ by default from add
names = [
k.__name__, # Default method
k.__name__ + "_", # Inplace variant
"__" + k.__name__ + "__", # Dunder method
"__i" + k.__name__ + "__", # Inplace dunder method
"__r" + k.__name__ + "__", # Reverse dunder method
]
if k.__name__.startswith("bitwise_"):
# bitwise_<op> have dunder methods of the form __<op>__
# And so on.
subname = k.__name__[len("bitwise_"):]
names.extend([
"__" + subname + "__",
"__i" + subname + "__",
"__r" + subname + "__"
])
for name in names:
func = getattr(Tensor, name, None)
if callable(func) and func not in ret and func not in ignored:
ret2[func] = v
ret.update(ret2)
return ret
def wrap_torch_function(dispatcher: Callable):
"""Wraps a given function with ``__torch_function__`` -related functionality.
Parameters
----------
dispatcher: Callable
A callable that returns an iterable of Tensor-likes passed into the function.
Note
----
This decorator may reduce the performance of your code. Generally, it's enough to express
your code as a series of functions that, themselves, support __torch_function__. If you
find yourself in the rare situation where this is not the case, e.g. if you're wrapping a
low-level library and you also need it to work for Tensor-likes, then this function is available.
Examples
--------
>>> def dispatcher(a): # Must have the same signature as func
... return (a,)
>>> @torch.overrides.wrap_torch_function(dispatcher)
>>> def func(a): # This will make func dispatchable by __torch_function__
... return a + 0
"""
def inner(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
relevant_args = dispatcher(*args, **kwargs)
if has_torch_function(relevant_args):
return handle_torch_function(func, relevant_args, *args, **kwargs)
return func(*args, **kwargs)
return wrapped
return inner
def _get_overloaded_args(relevant_args: Iterable[Any]) -> List[Any]:
"""Returns a list of arguments on which to call __torch_function__.
Checks arguments in relevant_args for __torch_function__ implementations,
storing references to the arguments and their types in overloaded_args and
overloaded_types in order of calling precedence. Only distinct types are
considered. If a type is a subclass of another type it will have higher
precedence, otherwise the precedence order is the same as the order of
arguments in relevant_args, that is, from left-to-right in the argument list.
The precedence-determining algorithm implemented in this function is
described in `NEP-0018`_.
See torch::append_overloaded_arg for the equivalent function in the C++
implementation.
Parameters
----------
relevant_args : iterable of array-like
Iterable of array-like arguments to check for __torch_function__
methods.
Returns
-------
overloaded_args : list
Arguments from relevant_args on which to call __torch_function__
methods, in the order in which they should be called.
.. _NEP-0018:
https://numpy.org/neps/nep-0018-array-function-protocol.html
"""
# Runtime is O(num_arguments * num_unique_types)
overloaded_types: Set[Type] = set()
overloaded_args: List[Any] = []
for arg in relevant_args:
arg_type = type(arg)
# We only collect arguments if they have a unique type, which ensures
# reasonable performance even with a long list of possibly overloaded
# arguments.
if (arg_type not in overloaded_types and hasattr(arg_type, '__torch_function__')):
# Create lists explicitly for the first type (usually the only one
# done) to avoid setting up the iterator for overloaded_args.
if overloaded_types:
overloaded_types.add(arg_type)
# By default, insert argument at the end, but if it is
# subclass of another argument, insert it before that argument.
# This ensures "subclasses before superclasses".
index = len(overloaded_args)
for i, old_arg in enumerate(overloaded_args):
if issubclass(arg_type, type(old_arg)):
index = i
break
overloaded_args.insert(index, arg)
else:
overloaded_types = {arg_type}
overloaded_args = [arg]
return overloaded_args
def handle_torch_function(
public_api: Callable, relevant_args: Iterable[Any], *args, **kwargs) -> Any:
"""Implement a function with checks for ``__torch_function__`` overrides.
See torch::autograd::handle_torch_function for the equivalent of this
function in the C++ implementation.
Arguments
---------
public_api : function
Function exposed by the public torch API originally called like
``public_api(*args, **kwargs)`` on which arguments are now being
checked.
relevant_args : iterable
Iterable of arguments to check for __torch_function__ methods.
args : tuple
Arbitrary positional arguments originally passed into ``public_api``.
kwargs : tuple
Arbitrary keyword arguments originally passed into ``public_api``.
Returns
-------
object
Result from calling ``implementation`` or an ``__torch_function__``
method, as appropriate.
Raises
------
TypeError : if no implementation is found.
Example
-------
>>> def func(a):
... if type(a) is not torch.Tensor: # This will make func dispatchable by __torch_function__
... return handle_torch_function(func, (a,), a)
... return a + 0
"""
# Check for __torch_function__ methods.
overloaded_args = _get_overloaded_args(relevant_args)
# overloaded_args already have unique types.
types = tuple(map(type, overloaded_args))
# Call overrides
for overloaded_arg in overloaded_args:
# Use `public_api` instead of `implementation` so __torch_function__
# implementations can do equality/identity comparisons.
result = overloaded_arg.__torch_function__(public_api, types, args, kwargs)
if result is not NotImplemented:
return result
func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
raise TypeError("no implementation found for '{}' on types that implement "
'__torch_function__: {}'
.format(func_name, [type(arg) for arg in overloaded_args]))
has_torch_function = _add_docstr(
_has_torch_function,
r"""Check for __torch_function__ implementations in the elements of an iterable.
Considers exact ``Tensor`` s and ``Parameter`` s non-dispatchable.
Arguments
---------
relevant_args : iterable
Iterable or aguments to check for __torch_function__ methods.
Returns
-------
bool
True if any of the elements of relevant_args have __torch_function__
implementations, False otherwise.
See Also
________
torch.is_tensor_like
Checks if something is a Tensor-like, including an exact ``Tensor``.
"""
)
has_torch_function_unary = _add_docstr(
_has_torch_function_unary,
r"""Special case of `has_torch_function` for single inputs.
Instead of:
`has_torch_function((t,))`
call:
`has_torch_function_unary(t)`
which skips unnecessary packing and unpacking work.
"""
)
has_torch_function_variadic = _add_docstr(
_has_torch_function_variadic,
r"""Special case of `has_torch_function` that skips tuple creation.
This uses the METH_FASTCALL protocol introduced in Python 3.7; for 3.6
and before it has roughly equivilent performance compared to
`has_torch_function`.
Instead of:
`has_torch_function((a, b))`
call:
`has_torch_function_variadic(a, b)`
which skips unnecessary packing and unpacking work.
"""
)
@functools.lru_cache(None)
def get_overridable_functions() -> Dict[Any, List[Callable]]:
"""List functions that are overridable via __torch_function__
Returns
-------
Dict[Any, List[Callable]]
A dictionary that maps namespaces that contain overridable functions
to functions in that namespace that can be overridden.
"""
overridable_funcs = collections.defaultdict(list)
tested_namespaces = [
(torch, torch.__all__ + dir(torch._C._VariableFunctions)),
(torch.functional, torch.functional.__all__),
(torch.nn.functional, dir(torch.nn.functional)),
(torch.Tensor, dir(torch.Tensor)),
(torch.linalg, dir(torch.linalg)),
(torch.fft, dir(torch.fft)),
(torch.special, dir(torch.special)),
]
for namespace, ns_funcs in tested_namespaces:
for func_name in ns_funcs:
# ignore private functions or functions that are deleted in torch.__init__
if namespace is not torch.Tensor:
if func_name.startswith('_'):
continue
elif func_name.endswith('_'):
continue
elif not func_name[0].islower():
continue
elif func_name == 'unique_dim':
continue
else:
func = getattr(namespace, func_name)
if getattr(object, func_name, None) == func:
continue
if func_name == '__weakref__':
continue
func = getattr(namespace, func_name)
if namespace is torch.Tensor and getattr(object, func_name, None) == func:
continue
# ignore re-exported modules
if isinstance(func, types.ModuleType):
continue
# ignore __future__ imports
if isinstance(func, __future__._Feature):
continue
if not callable(func) and hasattr(func, "__get__"):
if func.__get__ in get_ignored_functions():
msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
"but still has an explicit override")
assert func.__get__ not in get_testing_overrides(), msg.format(namespace, func.__name__)
continue
else:
overridable_funcs[func].append(func.__get__)
continue
if not callable(func):
continue
# cannot be overriden by __torch_function__
if func in get_ignored_functions():
msg = ("{}.{} is in the tuple returned by torch._overrides.get_ignored_functions "
"but still has an explicit override")
assert func not in get_testing_overrides(), msg.format(namespace, func.__name__)
continue
overridable_funcs[namespace].append(func)
return overridable_funcs
@functools.lru_cache(None)
def _get_tensor_methods() -> Set[Callable]:
""" Returns a set of the overridable methods on ``torch.Tensor`` """
overridable_funcs = get_overridable_functions()
methods = set(overridable_funcs[torch.Tensor])
return methods
def is_tensor_method_or_property(func: Callable) -> bool:
"""
Returns True if the function passed in is a handler for a
method or property belonging to ``torch.Tensor``, as passed
into ``__torch_function__``.
.. note::
For properties, their ``__get__`` method must be passed in.
This may be needed, in particular, for the following reasons:
1. Methods/properties sometimes don't contain a `__module__` slot.
2. They require that the first passed-in argument is an instance
of ``torch.Tensor``.
Examples
--------
>>> is_tensor_method_or_property(torch.Tensor.add)
True
>>> is_tensor_method_or_property(torch.add)
False
"""
return func in _get_tensor_methods() or func.__name__ == "__get__"
def is_tensor_like(inp):
"""
Returns ``True`` if the passed-in input is a Tensor-like.
Currently, this occurs whenever there's a ``__torch_function__``
attribute on the type of the input.
Examples
--------
A subclass of tensor is generally a Tensor-like.
>>> class SubTensor(torch.Tensor): ...
>>> is_tensor_like(SubTensor([0]))
True
Built-in or user types aren't usually Tensor-like.
>>> is_tensor_like(6)
False
>>> is_tensor_like(None)
False
>>> class NotATensor: ...
>>> is_tensor_like(NotATensor())
False
But, they can be made Tensor-like by implementing __torch_function__.
>>> class TensorLike:
... def __torch_function__(self, func, types, args, kwargs):
... return -1
>>> is_tensor_like(TensorLike())
True
"""
return type(inp) is torch.Tensor or hasattr(type(inp), "__torch_function__")
| [
"[email protected]"
] | |
2ea33ffcd0cae4ec18e5222c11a9c44aa4035f04 | ea3799fc5a6a7109c1c8089dc43993ce24a3fa2a | /FCN.py | 368956152f8a4125ed2d103dc0e50a1d8d6a8a28 | [] | no_license | gepu0221/FCN | 92edfc4bddd2ace1e10d2236a2f9a4ed672705c8 | df03409d5974cbec60d3d18853149c838d2f091b | refs/heads/master | 2020-03-16T18:14:40.414050 | 2019-03-11T12:40:44 | 2019-03-11T12:40:44 | 132,865,380 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,367 | py | from __future__ import print_function
import tensorflow as tf
import numpy as np
import time
import TensorflowUtils as utils
import read_MITSceneParsingData as scene_parsing
import datetime
import BatchDatsetReader as dataset
from six.moves import xrange
from label_pred import label_visualize,fit_ellipse
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_integer("batch_size", "2", "batch size for training")
tf.flags.DEFINE_integer("pred_num","1612","number for prediction")
tf.flags.DEFINE_string("logs_dir", "logs/", "path to logs directory")
#tf.flags.DEFINE_string("data_dir", "Data_zoo/MIT_SceneParsing/", "path to dataset")
#tf.flags.DEFINE_string("data_dir", "crop_save", "path to dataset")
tf.flags.DEFINE_string("data_dir", "image_save", "path to dataset")
tf.flags.DEFINE_float("learning_rate", "1e-4", "Learning rate for Adam Optimizer")
tf.flags.DEFINE_string("model_dir", "Model_zoo/", "Path to vgg model mat")
tf.flags.DEFINE_bool('debug', "False", "Debug mode: True/ False")
tf.flags.DEFINE_string('mode', "train", "Mode train/ test/ visualize")
#tf.flags.DEFINE_string('mode', "visualize", "Mode train/ test/ visualize")
MODEL_URL = 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat'
MAX_ITERATION = int(1e5 + 1)
NUM_OF_CLASSESS = 2
IMAGE_SIZE = 224
def vgg_net(weights, image):
layers = (
'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',
'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',
'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
'relu3_3', 'conv3_4', 'relu3_4', 'pool3',
'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
'relu4_3', 'conv4_4', 'relu4_4', 'pool4',
'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
'relu5_3', 'conv5_4', 'relu5_4'
)
net = {}
current = image
for i, name in enumerate(layers):
kind = name[:4]
if kind == 'conv':
kernels, bias = weights[i][0][0][0][0]
# matconvnet: weights are [width, height, in_channels, out_channels]
# tensorflow: weights are [height, width, in_channels, out_channels]
kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w")
bias = utils.get_variable(bias.reshape(-1), name=name + "_b")
current = utils.conv2d_basic(current, kernels, bias)
elif kind == 'relu':
current = tf.nn.relu(current, name=name)
if FLAGS.debug:
utils.add_activation_summary(current)
elif kind == 'pool':
current = utils.avg_pool_2x2(current)
net[name] = current
return net
def inference(image, keep_prob):
"""
Semantic segmentation network definition
:param image: input image. Should have values in range 0-255
:param keep_prob:
:return:
"""
print("setting up vgg initialized conv layers ...")
model_data = utils.get_model_data(FLAGS.model_dir, MODEL_URL)
mean = model_data['normalization'][0][0][0]
mean_pixel = np.mean(mean, axis=(0, 1))
weights = np.squeeze(model_data['layers'])
processed_image = utils.process_image(image, mean_pixel)
with tf.variable_scope("inference"):
image_net = vgg_net(weights, processed_image)
conv_final_layer = image_net["conv5_3"]
pool5 = utils.max_pool_2x2(conv_final_layer)
W6 = utils.weight_variable([7, 7, 512, 4096], name="W6")
b6 = utils.bias_variable([4096], name="b6")
conv6 = utils.conv2d_basic(pool5, W6, b6)
relu6 = tf.nn.relu(conv6, name="relu6")
if FLAGS.debug:
utils.add_activation_summary(relu6)
relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob)
W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7")
b7 = utils.bias_variable([4096], name="b7")
conv7 = utils.conv2d_basic(relu_dropout6, W7, b7)
relu7 = tf.nn.relu(conv7, name="relu7")
if FLAGS.debug:
utils.add_activation_summary(relu7)
relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob)
W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8")
b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8")
conv8 = utils.conv2d_basic(relu_dropout7, W8, b8)
# annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1")
# now to upscale to actual image size
deconv_shape1 = image_net["pool4"].get_shape()
W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1")
b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1")
conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"]))
fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1")
deconv_shape2 = image_net["pool3"].get_shape()
W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2")
b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2")
conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"]))
fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2")
shape = tf.shape(image)
deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS])
W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3")
b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3")
conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8)
annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction")
return tf.expand_dims(annotation_pred, dim=3), conv_t3
def train(loss_val, var_list):
optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
grads = optimizer.compute_gradients(loss_val, var_list=var_list)
if FLAGS.debug:
# print(len(var_list))
for grad, var in grads:
utils.add_gradient_summary(grad, var)
return optimizer.apply_gradients(grads)
def main(argv=None):
keep_probability = tf.placeholder(tf.float32, name="keep_probabilty")
image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image")
annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation")
pred_annotation, logits = inference(image, keep_probability)
tf.summary.image("input_image", image, max_outputs=2)
tf.summary.image("ground_truth", tf.cast(annotation, tf.uint8), max_outputs=2)
tf.summary.image("pred_annotation", tf.cast(pred_annotation, tf.uint8), max_outputs=2)
#logits:the last layer of conv net
#labels:the ground truth
loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
labels=tf.squeeze(annotation, squeeze_dims=[3]),
name="entropy")))
tf.summary.scalar("entropy", loss)
trainable_var = tf.trainable_variables()
if FLAGS.debug:
for var in trainable_var:
utils.add_to_regularization_and_summary(var)
train_op = train(loss, trainable_var)
print("Setting up summary op...")
summary_op = tf.summary.merge_all()
print("Setting up image reader...")
train_records, valid_records = scene_parsing.my_read_dataset(FLAGS.data_dir)
print('number of train_records',len(train_records))
print('number of valid_records',len(valid_records))
print("Setting up dataset reader")
image_options = {'resize': True, 'resize_size': IMAGE_SIZE}
if FLAGS.mode == 'train':
train_dataset_reader = dataset.BatchDatset(train_records, image_options)
validation_dataset_reader = dataset.BatchDatset(valid_records, image_options)
sess = tf.Session()
print("Setting up Saver...")
saver = tf.train.Saver()
summary_writer = tf.summary.FileWriter(FLAGS.logs_dir, sess.graph)
sess.run(tf.global_variables_initializer())
ckpt = tf.train.get_checkpoint_state(FLAGS.logs_dir)
#if not train,restore the model trained before
if ckpt and ckpt.model_checkpoint_path:
saver.restore(sess, ckpt.model_checkpoint_path)
print("Model restored...")
if FLAGS.mode == "train":
for itr in xrange(MAX_ITERATION):
train_images, train_annotations = train_dataset_reader.next_batch(FLAGS.batch_size)
feed_dict = {image: train_images, annotation: train_annotations, keep_probability: 0.85}
'''pred_annotation_, logits_=sess.run([pred_annotation, logits],feed_dict=feed_dict)
print('shape of pred_anno',pred_annotation_.shape)
print('pred_anno',pred_annotation_)
print('shape of conv3',logits_.shape)
print('logit',logits_)
print('shape of train_annotations',train_annotations.shape)
print('train_annotations',train_annotations)'''
sess.run(train_op, feed_dict=feed_dict)
if itr % 10 == 0:
train_loss, summary_str = sess.run([loss, summary_op], feed_dict=feed_dict)
print("Step: %d, Train_loss:%g" % (itr, train_loss))
summary_writer.add_summary(summary_str, itr)
if itr % 500 == 0:
valid_images, valid_annotations = validation_dataset_reader.next_batch(FLAGS.batch_size)
valid_loss = sess.run(loss, feed_dict={image: valid_images, annotation: valid_annotations,
keep_probability: 1.0})
print("%s ---> Validation_loss: %g" % (datetime.datetime.now(), valid_loss))
saver.save(sess, FLAGS.logs_dir + "model.ckpt", itr)
elif FLAGS.mode == "visualize":
valid_images, valid_annotations = validation_dataset_reader.get_random_batch(FLAGS.batch_size)
t_start = time.time()
pred = sess.run(pred_annotation, feed_dict={image: valid_images, annotation: valid_annotations,
keep_probability: 1.0})
'''t_elapsed = time.time() - t_start
speed = len(pred)/t_elapsed
print('the num of picture',len(pred))
print('speed',speed)'''
valid_annotations = np.squeeze(valid_annotations, axis=3)
pred = np.squeeze(pred, axis=3)
t_elapsed_s = time.time() - t_start
speed_s = len(pred)/t_elapsed_s
print('the num of picture',len(pred))
print('speed_neddle',speed_s)
#fit_ellipse
'''for itr in range(FLAGS.batch_size):
valid_images_=fit_ellipse(valid_images[itr],pred[itr])
utils.save_image(valid_images_.astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="pred_" + str(5+itr))
print("Saved image: %d" % itr)'''
#label_predict_pixel
for itr in range(FLAGS.batch_size):
valid_images_=label_visualize(valid_images[itr],pred[itr])
utils.save_image(valid_images_.astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="pred_" + str(5+itr))
print("Saved image: %d" % itr)
#the origin fcn validation
'''for itr in range(FLAGS.batch_size):
utils.save_image(valid_images[itr].astype(np.uint8), FLAGS.logs_dir, name="inp_" + str(5+itr))
utils.save_image(valid_annotations[itr].astype(np.uint8), FLAGS.logs_dir, name="gt_" + str(5+itr))
utils.save_image(pred[itr].astype(np.uint8), FLAGS.logs_dir, name="pred_" + str(5+itr))
print("Saved image: %d" % itr)'''
t_elapsed_ = time.time() - t_start
speed = len(pred)/t_elapsed_
print('the num of picture',len(pred))
print('speed_neddle',speed)
if __name__ == "__main__":
tf.app.run()
| [
"[email protected]"
] | |
8729474e8b2422a1e50214878c35fd5195d111dc | f4434c85e3814b6347f8f8099c081ed4af5678a5 | /sdk/communication/azure-communication-chat/tests/test_chat_thread_client_e2e_async.py | 69f1236074986d93928e4b4bce029ea46665aab2 | [
"MIT",
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later"
] | permissive | yunhaoling/azure-sdk-for-python | 5da12a174a37672ac6ed8e3c1f863cb77010a506 | c4eb0ca1aadb76ad892114230473034830116362 | refs/heads/master | 2022-06-11T01:17:39.636461 | 2020-12-08T17:42:08 | 2020-12-08T17:42:08 | 177,675,796 | 1 | 0 | MIT | 2020-03-31T20:35:17 | 2019-03-25T22:43:40 | Python | UTF-8 | Python | false | false | 10,433 | py | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
import pytest
import asyncio
import os
from datetime import datetime
from msrest.serialization import TZ_UTC
from azure.communication.administration import CommunicationIdentityClient
from azure.communication.chat.aio import (
ChatClient,
CommunicationUserCredential
)
from azure.communication.chat import (
ChatThreadMember,
ChatMessagePriority
)
from azure.communication.administration._shared.utils import parse_connection_str
from azure_devtools.scenario_tests import RecordingProcessor
from helper import URIIdentityReplacer
from chat_e2e_helper import ChatURIReplacer
from _shared.asynctestcase import AsyncCommunicationTestCase
from _shared.testcase import BodyReplacerProcessor, ResponseReplacerProcessor
class ChatThreadClientTestAsync(AsyncCommunicationTestCase):
def setUp(self):
super(ChatThreadClientTestAsync, self).setUp()
self.recording_processors.extend([
BodyReplacerProcessor(keys=["id", "token", "senderId", "chatMessageId", "nextLink", "members", "multipleStatus", "value"]),
URIIdentityReplacer(),
ResponseReplacerProcessor(keys=[self._resource_name]),
ChatURIReplacer()])
endpoint, _ = parse_connection_str(self.connection_str)
self.endpoint = endpoint
self.identity_client = CommunicationIdentityClient.from_connection_string(self.connection_str)
# create user
self.user = self.identity_client.create_user()
token_response = self.identity_client.issue_token(self.user, scopes=["chat"])
self.token = token_response.token
# create another user
self.new_user = self.identity_client.create_user()
# create ChatClient
self.chat_client = ChatClient(self.endpoint, CommunicationUserCredential(self.token))
def tearDown(self):
super(ChatThreadClientTestAsync, self).tearDown()
# delete created users
if not self.is_playback():
self.identity_client.delete_user(self.user)
self.identity_client.delete_user(self.new_user)
async def _create_thread(self):
# create chat thread
topic = "test topic"
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
members = [ChatThreadMember(
user=self.user,
display_name='name',
share_history_time=share_history_time
)]
self.chat_thread_client = await self.chat_client.create_chat_thread(topic, members)
self.thread_id = self.chat_thread_client.thread_id
async def _send_message(self):
# send a message
priority = ChatMessagePriority.NORMAL
content = 'hello world'
sender_display_name = 'sender name'
create_message_result = await self.chat_thread_client.send_message(
content,
priority=priority,
sender_display_name=sender_display_name)
self.message_id = create_message_result.id
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_update_thread(self):
async with self.chat_client:
await self._create_thread()
topic = "update topic"
async with self.chat_thread_client:
await self.chat_thread_client.update_thread(topic=topic)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
priority = ChatMessagePriority.NORMAL
content = 'hello world'
sender_display_name = 'sender name'
create_message_result = await self.chat_thread_client.send_message(
content,
priority=priority,
sender_display_name=sender_display_name)
self.assertTrue(create_message_result.id)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_get_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
message = await self.chat_thread_client.get_message(self.message_id)
assert message.id == self.message_id
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_messages(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
if self.is_live:
await asyncio.sleep(2)
chat_messages = self.chat_thread_client.list_messages(results_per_page=1)
items = []
async for item in chat_messages:
items.append(item)
assert len(items) > 0
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_update_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
content = "updated message content"
await self.chat_thread_client.update_message(self.message_id, content=content)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_delete_message(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
await self.chat_thread_client.delete_message(self.message_id)
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_list_members(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
chat_thread_members = self.chat_thread_client.list_members()
items = []
async for item in chat_thread_members:
items.append(item)
assert len(items) == 1
# delete chat threads
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_add_members(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_member = ChatThreadMember(
user=self.new_user,
display_name='name',
share_history_time=share_history_time)
members = [new_member]
await self.chat_thread_client.add_members(members)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_remove_member(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
# add member first
share_history_time = datetime.utcnow()
share_history_time = share_history_time.replace(tzinfo=TZ_UTC)
new_member = ChatThreadMember(
user=self.new_user,
display_name='name',
share_history_time=share_history_time)
members = [new_member]
await self.chat_thread_client.add_members(members)
# test remove member
await self.chat_thread_client.remove_member(self.new_user)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_typing_notification(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self.chat_thread_client.send_typing_notification()
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
@pytest.mark.live_test_only
@AsyncCommunicationTestCase.await_prepared_test
async def test_send_read_receipt(self):
async with self.chat_client:
await self._create_thread()
async with self.chat_thread_client:
await self._send_message()
await self.chat_thread_client.send_read_receipt(self.message_id)
if not self.is_playback():
await self.chat_client.delete_chat_thread(self.thread_id)
| [
"[email protected]"
] | |
53d89cc1f1796b277ee380a6a9c9f66fd6838149 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/139/usersdata/247/61611/submittedfiles/diagonaldominante.py | 4c46b3db468eb5120f665f18eaa6860ff491cb18 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 117 | py | # -*- coding: utf-8 -*-
i=1
n=int(input('d'))
cont=0
while i<n:
if i%2==1:
cont=cont+i
print(cont)
| [
"[email protected]"
] | |
241ee1d4fb08eab48847636ac8aa19413cffb686 | 846b11ccf549aba144c1824a24292a4850860ca7 | /2-EstruturaDeDecisao/5.py | ecaf59269d84595e978da0a147427a07e1c37567 | [] | no_license | paulocesarcsdev/ExerciciosPython | 6d1feff293e7efc4cd3fbc62eee0add93f76db99 | 25bfaa6dc5cb294242e478a2b253a8ca5d9c7078 | refs/heads/master | 2023-05-15T00:53:22.151884 | 2021-06-10T03:04:04 | 2021-06-10T03:04:04 | 337,847,875 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 602 | py | '''
Faรงa um programa para a leitura de duas notas parciais de um aluno. O programa deve calcular a mรฉdia alcanรงada por aluno e apresentar:
A mensagem "Aprovado", se a mรฉdia alcanรงada for maior ou igual a sete;
A mensagem "Reprovado", se a mรฉdia for menor do que sete;
A mensagem "Aprovado com Distinรงรฃo", se a mรฉdia for igual a dez.
'''
notaUm = int(input('Primeira nota: '))
notaDois = int(input('Segunda nota: '))
media = (notaUm + notaDois) / 2
if(media >= 7):
print('Aprovado')
if(media < 7):
print('Reprovado')
if(media == 10):
print('Aprovado com Distinรงรฃo')
| [
"[email protected]"
] | |
5b8904fa1f4944088bfa67fd47c8959d3ab4ffb4 | 9ae6ce54bf9a2a86201961fdbd5e7b0ec913ff56 | /google/ads/googleads/v9/resources/types/bidding_data_exclusion.py | 9fc75f0ae1869a171b9aa89c12199bed820c8544 | [
"Apache-2.0"
] | permissive | GerhardusM/google-ads-python | 73b275a06e5401e6b951a6cd99af98c247e34aa3 | 676ac5fcb5bec0d9b5897f4c950049dac5647555 | refs/heads/master | 2022-07-06T19:05:50.932553 | 2022-06-17T20:41:17 | 2022-06-17T20:41:17 | 207,535,443 | 0 | 0 | Apache-2.0 | 2019-09-10T10:58:55 | 2019-09-10T10:58:55 | null | UTF-8 | Python | false | false | 4,930 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.ads.googleads.v9.enums.types import advertising_channel_type
from google.ads.googleads.v9.enums.types import device
from google.ads.googleads.v9.enums.types import seasonality_event_scope
from google.ads.googleads.v9.enums.types import seasonality_event_status
__protobuf__ = proto.module(
package="google.ads.googleads.v9.resources",
marshal="google.ads.googleads.v9",
manifest={"BiddingDataExclusion",},
)
class BiddingDataExclusion(proto.Message):
r"""Represents a bidding data exclusion.
See "About data exclusions" at
https://support.google.com/google-ads/answer/10370710.
Attributes:
resource_name (str):
Immutable. The resource name of the data exclusion. Data
exclusion resource names have the form:
``customers/{customer_id}/biddingDataExclusions/{data_exclusion_id}``
data_exclusion_id (int):
Output only. The ID of the data exclusion.
scope (google.ads.googleads.v9.enums.types.SeasonalityEventScopeEnum.SeasonalityEventScope):
The scope of the data exclusion.
status (google.ads.googleads.v9.enums.types.SeasonalityEventStatusEnum.SeasonalityEventStatus):
Output only. The status of the data
exclusion.
start_date_time (str):
Required. The inclusive start time of the
data exclusion in yyyy-MM-dd HH:mm:ss format.
A data exclusion is backward looking and should
be used for events that start in the past and
end either in the past or future.
end_date_time (str):
Required. The exclusive end time of the data exclusion in
yyyy-MM-dd HH:mm:ss format.
The length of [start_date_time, end_date_time) interval must
be within (0, 14 days].
name (str):
The name of the data exclusion. The name can
be at most 255 characters.
description (str):
The description of the data exclusion. The
description can be at most 2048 characters.
devices (Sequence[google.ads.googleads.v9.enums.types.DeviceEnum.Device]):
If not specified, all devices will be
included in this exclusion. Otherwise, only the
specified targeted devices will be included in
this exclusion.
campaigns (Sequence[str]):
The data exclusion will apply to the campaigns listed when
the scope of this exclusion is CAMPAIGN. The maximum number
of campaigns per event is 2000. Note: a data exclusion with
both advertising_channel_types and campaign_ids is not
supported.
advertising_channel_types (Sequence[google.ads.googleads.v9.enums.types.AdvertisingChannelTypeEnum.AdvertisingChannelType]):
The data_exclusion will apply to all the campaigns under the
listed channels retroactively as well as going forward when
the scope of this exclusion is CHANNEL. The supported
advertising channel types are DISPLAY, SEARCH and SHOPPING.
Note: a data exclusion with both advertising_channel_types
and campaign_ids is not supported.
"""
resource_name = proto.Field(proto.STRING, number=1,)
data_exclusion_id = proto.Field(proto.INT64, number=2,)
scope = proto.Field(
proto.ENUM,
number=3,
enum=seasonality_event_scope.SeasonalityEventScopeEnum.SeasonalityEventScope,
)
status = proto.Field(
proto.ENUM,
number=4,
enum=seasonality_event_status.SeasonalityEventStatusEnum.SeasonalityEventStatus,
)
start_date_time = proto.Field(proto.STRING, number=5,)
end_date_time = proto.Field(proto.STRING, number=6,)
name = proto.Field(proto.STRING, number=7,)
description = proto.Field(proto.STRING, number=8,)
devices = proto.RepeatedField(
proto.ENUM, number=9, enum=device.DeviceEnum.Device,
)
campaigns = proto.RepeatedField(proto.STRING, number=10,)
advertising_channel_types = proto.RepeatedField(
proto.ENUM,
number=11,
enum=advertising_channel_type.AdvertisingChannelTypeEnum.AdvertisingChannelType,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| [
"[email protected]"
] | |
1f6b0223ae64b698f778132e92aa7961497b2301 | 2cd0c89e4002871dddd6d0872c8105b92f730a11 | /Publishing/_old/AssetPublish_v01.py | fbfdff88c9d339803ead8ba1e0e6c2f8535a364f | [] | no_license | zethwillie/froggerPipeline | d130d5d13a30aa085061e5af918f59f1c4184517 | 0ab3a5462b8a3e1f36e21c77df88e1039a7b3e4d | refs/heads/master | 2021-04-29T14:40:13.402162 | 2018-03-14T16:03:34 | 2018-03-14T16:03:34 | 121,779,043 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 13,037 | py | import os
from shutil import copy2 as copy2
import fnmatch
import maya.cmds as cmds
import maya.mel as mel
import Utilities.utilityFunctions as uf
reload(uf)
# the fbx presets
preset="X:/Production/Code/Maya/Tools/PipelineTools/Python/Publishing/Cali_Scale_Cm.fbxexportpreset"
def publish_maya_scene(versionUp=True, origScene=None, *args):
"""
only do for rigging and modeling phase of assets
ARGS:
versionUp (bool): whether to version up the maya scene
origScene (string): the full path to the file to publish
"""
if not origScene:
cmds.warning("assetPublish.publish_maya_scene: You haven't passed in a scene path!")
return(False)
pp = uf.PathParser(origScene)
cmds.file(s=True)
sel = cmds.ls(sl=True)
if not sel:
cmds.warning("assetManager.publish_maya_scene: You haven't selected anything in your scene. Please select what you wish to publish. (hint: for models, your geo/geo grp. For rigs, usually it will be your char group and ctrl set)")
return(False)
if versionUp:
verNum = int(origScene[-7:-3])
pp.get_version_info()
nums = pp.versionNumbers
newNum = nums[-1]+1
verUpFile = "{0}{1}{2}".format(origScene[:-7], str(newNum).zfill(4),".mb")
# parse the new path/name for publish(current scene)
pubPath = uf.fix_path(os.path.join(pp.phasePath, "Publish/MB/"))
tokens = pp.fileName.split("_")
tokens[-2] = "Publish"
pubFileName = "_".join(tokens)
pubFilePath = uf.fix_path(os.path.join(pubPath, pubFileName))
if os.path.isfile(pubFilePath):
overwrite = cmds.confirmDialog(title="Overwrite Confirmation", message = "A publish MB already exists for this file.\nShould we overwrite?", button = ("Overwrite", "Cancel"), defaultButton = "Overwrite", cancelButton = "Cancel", dismissString = "Cancel")
if overwrite == "Cancel":
print "Publish skipped (no overwrite) for maya file (.mb) stage of {0}".format(pubFilePath)
return(True)
if versionUp:
# in background copy the orig to the new version
# (essentially just versioning up while staying in old version)
copy2(origScene, verUpFile)
print "===== Versioned up {0} to {1}!".format(origScene, verUpFile)
else:
print "===== Not versioning up publish of {0}".format(origScene)
# export selection to publish file
print "===== AssetPublish.publish_maya_scene: Preparing to export items to publish file: {0}".format(sel)
export = cmds.file(pubFilePath, exportSelected=True, type="mayaBinary")
if export==pubFilePath:
print "===== AssetPublish.publish_maya_scene: Succesfully published file to: {0}".format(pubFilePath)
return(True)
def publish_fbx_rig_file(versionUp=True, origScene=None, *args):
"""
requires an EXPORT_JNT_Grp group with one root for each export rig named: 'name_Root_Jnt'
requires a GEO group with one folder for each export rig named: 'name_Geo_Grp'
names should correspond ("fish_Root_Jnt", "fish_Geo_Grp")
"""
# all happens in current:
if not origScene:
cmds.warning("assetPublish.publish_fbx_rig_file: You haven't passed in a scene path!")
return(False)
pp = uf.PathParser(origScene)
geoGrp = cmds.ls("GEO")
jntGrp = cmds.ls("EXPORT_JNT_Grp")
# check for geo grps
if not geoGrp or len(geoGrp)>1:
cmds.warning("AssetPublish.publish_fbx_rig_file:You either have no grp called 'GEO', or too many objects called 'GEO'.\n fbx export aborted!")
return(False)
geos = child_match_check(geoGrp, "*_Geo_Grp")
if not geos:
return(False)
# check for jnt grps
if not jntGrp or len(jntGrp)>1:
cmds.warning("AssetPublish.publish_fbx_rig_file:You either have no grp called 'EXPORT_JNT_Grp', or too many objects called 'EXPORT_JNT_Grp'.\n fbx export aborted!")
return(False)
roots = child_match_check(jntGrp, "*_Root_Jnt")
if not roots:
return(False)
# check correspondence
correspond = check_correspondence(geos, roots)
if not correspond:
return(False)
pubFbxPath = uf.fix_path(os.path.join(pp.phasePath, "Publish/FBX/"))
tokens = pp.fileName.split("_")
tokens[-2] = "Publish"
pubFileName = "_".join(tokens)[:-3] + ".fbx"
pubFilePath = uf.fix_path(os.path.join(pubFbxPath, pubFileName))
# check if there's any animation in the file (time based), abort if there is
# check for references, etc. .
# delete constraints
cmds.delete(cmds.ls(type="constraint"))
cmds.select(cl=True)
# move jnts and geo into world parent
for root in roots:
basename = root.split("_Root_Jnt")[0]
geo = "{0}_Geo_Grp".format(basename)
cmds.parent([geo, root], w=True)
# create filename
tokens[-2] = basename
pubFileName = "_".join(tokens)[:-3] + ".fbx"
pubFilePath = uf.fix_path(os.path.join(pubFbxPath, pubFileName))
cmds.select([root, geo], r=True)
# if this exists, should we overwrite?
if os.path.isfile(pubFilePath):
overwrite = cmds.confirmDialog(title="Overwrite Confirmation", message = "A publish FBX already exists for this file.\nShould we overwrite?", button = ("Overwrite", "Cancel"), defaultButton = "Overwrite", cancelButton = "Cancel", dismissString = "Cancel")
if overwrite == "Cancel":
print "Publish skipped for FBX file (.fbx) called {0}".format(pubFilePath)
return(True)
mel.eval('FBXLoadExportPresetFile -f "{0}";'.format(preset))
mel.eval('FBXExport -f "{0}" -s'.format(pubFilePath))
return(True)
def check_correspondence(geoList, jntList):
"""
checks that a geo grp exists for each root jnt grp and visa versa
"""
for jnt in jntList:
name = jnt.split("_Root_Jnt")[0]
if not cmds.objExists("{0}_Geo_Grp".format(name)):
cmds.warning("AssetPublish.publish_fbx_file:Couldn't find corresponding jnt and geo grps for: {0}! Aborting!".format(name))
return(False)
for geo in geoList:
name = geo.split("_Geo_Grp")[0]
if not cmds.objExists("{0}_Root_Jnt".format(name)):
cmds.warning("AssetPublish.publish_fbx_file:Couldn't find corresponding jnt and geo grps for: {0}! Aborting!".format(name))
return(False)
return(True)
def publish_fbx_anim_file(versionUp=True, origScene=None, *args):
# THIS IS FOR ANIM EXPORTING
if not origScene:
cmds.warning("assetPublish.publish_fbx_anim_file: You haven't passed in a scene path!")
return(False)
# assuming references
refs = cmds.file(q=True, r=True)
if not refs:
cmds.warning("There are no references in this scene. . .")
return(False)
if len(refs) > 1:
cmds.warning("There are too many references in this scene. . .")
return(False)
# below would all be under a for loop for each reference in the stages?
pp = uf.PathParser(origScene)
# assuming a namespace
geoGrp = cmds.ls("*:GEO")
jntGrp = cmds.ls("*:EXPORT_JNT_Grp")
# check for geo grps
if not geoGrp or len(geoGrp)>1:
cmds.warning("AssetPublish.publish_fbx_anim_file:You either have no grp called 'GEO' -IN A NAMESPACE-, or too many objects called 'GEO'.\n fbx export aborted!")
return(False)
geos = child_match_check(geoGrp[0], "*_Geo_Grp")
if not geos:
return(False)
# check for jnt grps
if not jntGrp or len(jntGrp)>1:
cmds.warning("AssetPublish.publish_fbx_anim_file:You either have no grp called 'EXPORT_JNT_Grp' -IN A NAMESPACE-, or too many objects called 'EXPORT_JNT_Grp'.\n fbx export aborted!")
return(False)
roots = child_match_check(jntGrp[0], "*_Root_Jnt")
if not roots:
return(False)
cmds.file(refs[0], ir=True)
# check correspondence of geo and root jnts
correspond = check_correspondence(geos, roots)
if not correspond:
return(False)
pubFbxPath = uf.fix_path(os.path.join(pp.phasePath, "Publish/FBX/"))
tokens = pp.fileName.split("_")
tokens[-2] = "Publish"
pubFileName = "_".join(tokens)[:-3] + ".fbx"
pubFilePath = uf.fix_path(os.path.join(pubFbxPath, pubFileName))
# bake joints
for r in roots:
# get child roots if joints
allD = cmds.listRelatives(r, allDescendents=True)
jnts = [x for x in allD if cmds.objectType(x, isa="joint")]
# function to bake selected
for j in jnts:
attr=["t","r","s"]
co=["x","y","z"]
attrLs=[]
for at in attr:
for c in co:
attrLs.append("%s.%s%s"%(j,at,c))
for x in attrLs:
try:
mc.setAttr(x, k=1)
except:
pass
namespace = cmds.file(refs[0], q=True, ns=True)
uf.remove_namespaces()
# delete constraints
cmds.delete(cmds.ls(type="constraint"))
cmds.select(cl=True)
# move jnts and geo into world parent
for root in roots:
basename = root.split("_Root_Jnt")[0]
geo = "{0}_Geo_Grp".format(basename)
cmds.parent([geo, root], w=True)
# create filename
tokens[-2] = basename
pubFileName = "_".join(tokens)[:-3] + ".fbx"
pubFilePath = uf.fix_path(os.path.join(pubFbxPath, pubFileName))
cmds.select([root, geo], r=True)
# if this exists, should we overwrite?
if os.path.isfile(pubFilePath):
overwrite = cmds.confirmDialog(title="Overwrite Confirmation", message = "A publish FBX already exists for this file.\nShould we overwrite?", button = ("Overwrite", "Cancel"), defaultButton = "Overwrite", cancelButton = "Cancel", dismissString = "Cancel")
if overwrite == "Cancel":
print "Publish skipped for FBX file (.fbx) called {0}".format(pubFilePath)
return(True)
mel.eval('FBXLoadExportPresetFile -f "{0}";'.format(preset))
mel.eval('FBXExport -f "{0}" -s'.format(pubFilePath))
return(True)
# loop through namespaces/references:
# import reference
# delete namespace
# loop through joint grps, geo grps:
# bake jnts
# clean up joints shit
# "CloudStage_Tree_main_Rig1_A_v0001.fbx"
# stage asset ns subgrp version
# check for version folder, create it if it doesn't exist
pass
def child_match_check(topNode, childString, *args):
children = cmds.listRelatives(topNode, c=True)
if not children:
cmds.warning("AssetPublish.child_match_check:Couldn't find anything under {0}! Aborting!".format(topNode))
return(None)
goodChildren = fnmatch.filter(children, childString)
if not goodChildren:
cmds.warning("AssetPublish.child_match_check:Couldn't find objects with '{0}' under {1}! Aborting!".format(childString, topNode))
return(None)
return(goodChildren)
def assetPublish(versionUp=True, *args):
"""
checks the current scene if it's compatible, if not kick out
ARGS:
versionUp (bool): whether to version up the work file on publish
"""
origScene = cmds.file(q=True, sn=True)
pp = uf.PathParser(origScene)
# bail if current scene is not compatible
if not pp.compatible:
cmds.warning("assetPublish.publish_maya_scene: You're not in a project compatible scene! Sorry. See a TD")
return()
# if it's not a stage file or a publish file and it's either modeling or rigging phase
if pp.assetType != "Stages" and pp.phase in ["Rigging", "Modeling"] and pp.stage=="Work":
mayapub = publish_maya_scene(versionUp, origScene)
if not mayapub: # i.e. we've failed somewhere in the func
return()
else:
print "===== skipping maya scene publish, since you're in {0} phase and {1} stage of the pipeline".format(pp.phase, pp.stage)
# if it's a rig work file
if pp.assetType != "Stages" and pp.phase in ["Rigging"] and pp.stage=="Work":
fbxPub = publish_fbx_rig_file(versionUp, origScene)
if not fbxPub:
return()
# if it's an anm work file
if pp.assetType != "Stages" and pp.phase in ["Animation"] and pp.stage=="Work":
pass
fbxPub = publish_fbx_anim_file(versionUp, origScene)
if not fbxPub:
return()
if versionUp:
verNum = int(pp.path[-7:-3])
pp.get_version_info()
nums = pp.versionNumbers
newNum = nums[-1]
verUpFile = "{0}{1}{2}".format(origScene[:-7], str(newNum).zfill(4),".mb")
print "assetPublish.assetPublish: Opening version up file:", verUpFile
if os.path.isfile(verUpFile):
cmds.file(verUpFile, open=True, force=True)
else:
cmds.file(pp.path, open=True, force=True)
else:
cmds.file(pp.path, open=True, force=True) | [
"[email protected]"
] | |
f3704e6ffdfc299ccbe48106a9f84b3607dfd38d | a0b7d64efae7804d82aac288f1ab36a98339c32a | /all_repos/manual.py | d8d1cad94614b5206dd88c6a14ebceb5fabfaa03 | [
"MIT"
] | permissive | asottile/all-repos | a4e71e2deabd4f82af8d8dda43289a22e9d22b28 | 54d272b276e391828b74efad9445f5eafc96c998 | refs/heads/main | 2023-09-01T14:39:35.708565 | 2023-08-29T15:13:51 | 2023-08-29T15:13:51 | 99,260,344 | 481 | 81 | MIT | 2023-09-05T11:38:16 | 2017-08-03T17:51:41 | Python | UTF-8 | Python | false | false | 1,302 | py | from __future__ import annotations
import argparse
from typing import Sequence
from all_repos import autofix_lib
from all_repos.config import Config
def find_repos(_: Config) -> list[str]:
raise AssertionError('--repos is required')
def main(argv: Sequence[str] | None = None) -> int:
parser = argparse.ArgumentParser(
description='Interactively apply a manual change across repos.',
usage='%(prog)s [options]',
)
autofix_lib.add_fixer_args(parser)
parser.add_argument(
'--branch-name', default='all-repos-manual',
help='override the autofixer branch name (default `%(default)s`).',
)
parser.add_argument(
'--commit-msg', '--commit-message', required=True,
help='set the autofixer commit message.',
)
args = parser.parse_args(argv)
# force interactive
args.interactive = True
repos, config, commit, autofix_settings = autofix_lib.from_cli(
args,
find_repos=find_repos,
msg=args.commit_msg,
branch_name=args.branch_name,
)
autofix_lib.fix(
repos,
apply_fix=autofix_lib.shell,
config=config,
commit=commit,
autofix_settings=autofix_settings,
)
return 0
if __name__ == '__main__':
raise SystemExit(main())
| [
"[email protected]"
] | |
ec2eb4274f2ab5c547553a43a9e89cf4b30551d7 | ad357cfbec64afb8f4cc4043b212996768f9755c | /api/barriers/serializers/mixins.py | 0bf4ea3db58f439746d9c561c10c87ca5eb2913b | [
"MIT"
] | permissive | uktrade/market-access-api | 6b4680e6455eb5c25480ccd3e3d9445654269f36 | 4da26d1be53843d22411577409d9489010bdda09 | refs/heads/master | 2023-08-30T14:47:10.373148 | 2023-08-29T13:58:08 | 2023-08-29T13:58:08 | 131,856,014 | 2 | 3 | MIT | 2023-09-14T08:04:42 | 2018-05-02T13:38:37 | Python | UTF-8 | Python | false | false | 3,729 | py | from rest_framework import serializers
class LocationFieldMixin(metaclass=serializers.SerializerMetaclass):
# Use metaclass to get the fields registered at the serializer where the mixin is used
# The field will need to be listed on the serializer using this mixin
location = serializers.SerializerMethodField()
def get_location(self, obj):
try:
return obj.location or ""
except AttributeError:
return ""
class EconomicAssessmentRatingFieldMixin(metaclass=serializers.SerializerMetaclass):
economic_assessment_rating = serializers.SerializerMethodField()
def get_economic_assessment_rating(self, obj):
assessment = obj.current_economic_assessment
if assessment:
return assessment.get_rating_display()
class EconomicAssessmentExplanationFieldMixin(
metaclass=serializers.SerializerMetaclass
):
economic_assessment_explanation = serializers.SerializerMethodField()
def get_economic_assessment_explanation(self, obj):
assessment = obj.current_economic_assessment
if assessment:
return assessment.explanation
class ValueToEconomyFieldMixin(metaclass=serializers.SerializerMetaclass):
"""Value of UK exports of affected goods to partner country"""
value_to_economy = serializers.SerializerMethodField()
def get_value_to_economy(self, obj):
assessment = obj.current_economic_assessment
if assessment:
return assessment.export_potential.get("uk_exports_affected")
class ImportMarketSizeFieldMixin(metaclass=serializers.SerializerMetaclass):
import_market_size = serializers.SerializerMethodField()
def get_import_market_size(self, obj):
"""Size of import market for affected product(s)"""
assessment = obj.current_economic_assessment
if assessment:
return assessment.export_potential.get("import_market_size")
class ValuationAssessmentRatingFieldMixin(metaclass=serializers.SerializerMetaclass):
valuation_assessment_rating = serializers.SerializerMethodField()
def get_valuation_assessment_rating(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.rating
class ValuationAssessmentMidpointFieldMixin(metaclass=serializers.SerializerMetaclass):
valuation_assessment_midpoint = serializers.SerializerMethodField()
valuation_assessment_midpoint_value = serializers.SerializerMethodField()
def get_valuation_assessment_midpoint(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.midpoint
def get_valuation_assessment_midpoint_value(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.midpoint_value
class ValuationAssessmentExplanationFieldMixin(
metaclass=serializers.SerializerMetaclass
):
valuation_assessment_explanation = serializers.SerializerMethodField()
def get_valuation_assessment_explanation(self, obj):
latest_valuation_assessment = obj.current_valuation_assessment
if latest_valuation_assessment:
return latest_valuation_assessment.explanation
class AssessmentFieldsMixin(
EconomicAssessmentRatingFieldMixin,
EconomicAssessmentExplanationFieldMixin,
ValueToEconomyFieldMixin,
ImportMarketSizeFieldMixin,
ValuationAssessmentRatingFieldMixin,
ValuationAssessmentMidpointFieldMixin,
ValuationAssessmentExplanationFieldMixin,
):
pass
| [
"[email protected]"
] | |
41eaf7e6aec7c41d1fd7d58171b45b152b251f0c | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/nncunningli.py | c8167f667286f6af6e6235660cde84e24409626d | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 159 | py | ii = [('BailJD2.py', 1), ('CarlTFR.py', 12), ('LyttELD.py', 1), ('WadeJEB.py', 1), ('GodwWLN.py', 1), ('SoutRD.py', 1), ('FitzRNS2.py', 1), ('BowrJMM3.py', 1)] | [
"[email protected]"
] | |
1c9687591a895a84bf4597f263816a8942879e51 | ce76b3ef70b885d7c354b6ddb8447d111548e0f1 | /feel_first_hand/work_different_thing_of_next_company.py | 1f68da0b0954abd51549c98382a5e50aac67ba34 | [] | no_license | JingkaiTang/github-play | 9bdca4115eee94a7b5e4ae9d3d6052514729ff21 | 51b550425a91a97480714fe9bc63cb5112f6f729 | refs/heads/master | 2021-01-20T20:18:21.249162 | 2016-08-19T07:20:12 | 2016-08-19T07:20:12 | 60,834,519 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 262 | py |
#! /usr/bin/env python
def place_or_life(str_arg):
right_point_or_important_work(str_arg)
print('case')
def right_point_or_important_work(str_arg):
print(str_arg)
if __name__ == '__main__':
place_or_life('large_government_and_different_man')
| [
"[email protected]"
] | |
f5b7ecf5498c79c23d17b6a98ceb9627c189511f | 2612f336d667a087823234daf946f09b40d8ca3d | /python/lib/Lib/site-packages/django/core/management/commands/diffsettings.py | 98b53b405d24da9b10a77e7b0bbc578bc9603626 | [
"Apache-2.0"
] | permissive | tnorbye/intellij-community | df7f181861fc5c551c02c73df3b00b70ab2dd589 | f01cf262fc196bf4dbb99e20cd937dee3705a7b6 | refs/heads/master | 2021-04-06T06:57:57.974599 | 2018-03-13T17:37:00 | 2018-03-13T17:37:00 | 125,079,130 | 2 | 0 | Apache-2.0 | 2018-03-13T16:09:41 | 2018-03-13T16:09:41 | null | UTF-8 | Python | false | false | 1,296 | py | from django.core.management.base import NoArgsCommand
def module_to_dict(module, omittable=lambda k: k.startswith('_')):
"Converts a module namespace to a Python dictionary. Used by get_settings_diff."
return dict([(k, repr(v)) for k, v in module.__dict__.items() if not omittable(k)])
class Command(NoArgsCommand):
help = """Displays differences between the current settings.py and Django's
default settings. Settings that don't appear in the defaults are
followed by "###"."""
requires_model_validation = False
def handle_noargs(self, **options):
# Inspired by Postfix's "postconf -n".
from django.conf import settings, global_settings
# Because settings are imported lazily, we need to explicitly load them.
settings._setup()
user_settings = module_to_dict(settings._wrapped)
default_settings = module_to_dict(global_settings)
output = []
keys = user_settings.keys()
keys.sort()
for key in keys:
if key not in default_settings:
output.append("%s = %s ###" % (key, user_settings[key]))
elif user_settings[key] != default_settings[key]:
output.append("%s = %s" % (key, user_settings[key]))
return '\n'.join(output)
| [
"[email protected]"
] | |
299979bbe9b2eb8b65b2921f4bd2f9e42a541143 | a66f75ac69e3494099886b14aa8c2abad11be89c | /partition_for_transaction.py | e999312058657c109630b1521a90f401ca395b3e | [
"MIT"
] | permissive | ryokugyu/1M_Generalization | fb54b30d5a870f8d8a8c81e96d260cc66aa4f781 | 679ca565e7390af2f2b2d7d85400b339a32a782d | refs/heads/master | 2020-09-29T18:55:56.529124 | 2016-11-18T07:16:05 | 2016-11-18T07:16:05 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,522 | py | """
main module of partition
"""
#!/usr/bin/env python
# coding=utf-8
import pdb
from models.bucket import Bucket
from itertools import combinations
from utils.utility import list_to_str
_DEBUG = False
PARENT_LIST = {}
ATT_TREES = {}
LEAF_NUM = 0
ELEMENT_NUM = 0
RESULT = []
DATA = []
# compare fuction for sort tree node
def node_cmp(node1, node2):
"""compare node1(str) and node2(str)
Compare two nodes accroding to their support
"""
support1 = len(ATT_TREES[node1])
support2 = len(ATT_TREES[node2])
if support1 != support2:
return cmp(support1, support2)
else:
return cmp(node1, node2)
def information_gain(bucket, pick_value=''):
"""get information gain from bucket accroding to pick_value
"""
ig = 0.0
parent_value = bucket.value
cover_number = 0
# Herein, all ncp will be divided by the same denominator.
# So I don't computing true ncp, only use numerator part.
if pick_value == '':
# compute bucket's information gain
for gen_value in bucket.value:
if len(ATT_TREES[gen_value]) == 0:
continue
for temp in bucket.member_index:
ig = ig + trans_information_gain(DATA[temp], gen_value)
else:
# pick node's information gain
if len(ATT_TREES[pick_value]) == 0:
return 0
for temp in bucket.member_index:
ig = ig + trans_information_gain(DATA[temp], pick_value)
return ig
def trans_information_gain(tran, pick_value):
"""get information gain for trans accroding to pick_value
"""
ig = 0.0
ncp = len(ATT_TREES[pick_value])
for t in tran:
if pick_value in PARENT_LIST[t]:
ig += ncp
return ig
def pick_node(bucket):
"""find the split node with largest information gain.
Then split bucket to buckets accroding to this node.
"""
buckets = {}
result_list = []
max_ig = -10000
max_value = ''
check_list = [t for t in bucket.value if t not in bucket.split_list]
for t in check_list:
if len(ATT_TREES[t].child) != 0:
ig = information_gain(bucket, t)
if ig > max_ig:
max_ig = ig
max_value = t
# begin to expand node on pick_value
if max_value == '':
print "Error: list empty!!"
return ('', {})
# get index of max_value
index = bucket.value.index(max_value)
child_value = [t.value for t in ATT_TREES[max_value].child]
for i in range(1, len(child_value) + 1):
temp = combinations(child_value, i)
temp = [list(t) for t in temp]
result_list.extend(temp)
# generate child buckets
child_level = bucket.level[:]
child_value = bucket.value[:]
now_level = bucket.level[index] + 1
del child_level[index]
del child_value[index]
for temp in result_list:
temp_level = child_level[:]
temp_value = child_value[:]
for t in temp:
temp_level.insert(index, now_level)
temp_value.insert(index, t)
str_value = list_to_str(temp)
buckets[str_value] = Bucket([], temp_value, temp_level)
bucket.split_list.append(max_value)
return (max_value, buckets)
def distribute_data(bucket, buckets, pick_value):
"""distribute records from parent_bucket to buckets (splited buckets)
accroding to records elements.
"""
if len(buckets) == 0:
print "Error: buckets is empty!"
return
data_index = bucket.member_index[:]
for temp in data_index:
gen_list = []
for t in DATA[temp]:
treelist = PARENT_LIST[t]
try:
pos = treelist.index(pick_value)
# if covered, then replaced with new value
if pos > 0:
gen_list.append(treelist[pos - 1])
else:
print "Error: pick node is leaf, which cannot be splited"
except:
continue
gen_list = list(set(gen_list))
# sort to ensure the order
str_value = list_to_str(gen_list)
try:
buckets[str_value].member_index.append(temp)
except:
pdb.set_trace()
print "Error: Cannot find key."
def balance_partitions(parent_bucket, buckets, K, pick_value):
"""handel buckets with less than K records
"""
global RESULT
left_over = []
for k, t in buckets.items():
if len(t.member_index) < K:
# add records of buckets with less than K elemnts
# to left_over partition
left_over.extend(t.member_index[:])
del buckets[k]
if len(left_over) == 0:
# left over bucket is empty, skip balance step
return
# re-distribute transactions with least information gain from
# buckets over k to left_over, to enshure number of
# records in left_over is larger than K
# using flag to denote if re-distribute is successful or not
flag = True
while len(left_over) < K:
# each iterator pick least information gain transaction from buckets over K
check_list = [t for t in buckets.values() if len(t.member_index) > K]
if len(check_list) == 0:
flag = False
break
min_ig = 10000000000000000
min_key = (0, 0)
for i, temp in enumerate(check_list):
for j, t in enumerate(temp.member_index):
ig = trans_information_gain(DATA[t], pick_value)
if ig < min_ig:
min_ig = ig
min_key = (i, j)
left_over.append(check_list[min_key[0]].member_index[min_key[1]])
del check_list[min_key[0]].member_index[min_key[1]]
if flag is not True:
# Note: if flag == False, means that split is unsuccessful.
# So we need to pop a bucket from buckets to merge with left_over
# The bucket poped is larger than K, so left over will larger than K
parent_bucket.splitable = False
try:
min_ig = 10000000000000000
min_key = ''
for k, t in buckets.items():
ig = information_gain(t, pick_value)
if ig < min_ig:
min_ig = ig
min_key = k
left_over.extend(buckets[min_key].member_index[:])
del buckets[min_key]
except:
print "Error: buckets is empty"
pdb.set_trace()
parent_bucket.member_index = left_over[:]
str_value = list_to_str(parent_bucket.value)
buckets[str_value] = parent_bucket
def check_splitable(bucket, K):
"""check if bucket can further drill down
"""
check_list = [t for t in bucket.value if t not in bucket.split_list]
if bucket.splitable:
for t in check_list:
if len(ATT_TREES[t].child) != 0:
return True
bucket.splitable = False
return False
def anonymize(bucket, K):
"""recursively split dataset to create anonymization buckets
"""
global RESULT
if check_splitable(bucket, K) is not True:
RESULT.append(bucket)
return
(pick_value, expandNode) = pick_node(bucket)
distribute_data(bucket, expandNode, pick_value)
balance_partitions(bucket, expandNode, K, pick_value)
for t in expandNode.values():
anonymize(t, K)
def iloss(tran, middle):
"""return iloss caused by anon tran to middle
"""
iloss = 0.0
for t in tran:
ntemp = ATT_TREES[t]
checktemp = ntemp.parent[:]
checktemp.insert(0, ntemp)
for ptemp in checktemp:
if ptemp.value in middle:
break
else:
print "Program Error!!!! t=%s middle=%s" % (t, middle)
pdb.set_trace()
if ptemp.value == t:
continue
iloss = iloss + len(ptemp)
# only one attribute is involved, so we can simplfy NCP
iloss = iloss * 1.0 / LEAF_NUM
return iloss
def setalliloss(buckets):
"""return iloss sum of buckets, recompute iloss foreach bucket
"""
alliloss = 0.0
for gtemp in buckets:
gloss = 0.0
for mtemp in gtemp.member_index:
gloss = gloss + iloss(DATA[mtemp], gtemp.value)
gtemp.iloss = gloss
alliloss += gloss
alliloss = alliloss * 1.0 / ELEMENT_NUM
return alliloss
def init(att_tree, data, K):
global LEAF_NUM, PARENT_LIST, ATT_TREES, ELEMENT_NUM, DATA, RESULT
RESULT = []
PARENT_LIST = {}
ELEMENT_NUM = 0
LEAF_NUM = 0
DATA = data[:]
for t in DATA:
ELEMENT_NUM += len(t)
ATT_TREES = att_tree
LEAF_NUM = len(ATT_TREES['*'])
for k, v in ATT_TREES.iteritems():
if len(v) == 0:
PARENT_LIST[k] = [t.value for t in v.parent]
PARENT_LIST[k].insert(0, k)
def partition(att_tree, data, K):
"""partition tran part of microdata
"""
init(att_tree, data, K)
result = []
if _DEBUG:
print '-' * 30
print "K=%d" % K
print "Begin Partition!"
anonymize(Bucket(range(len(DATA)), ['*'], [0]), K)
# print "Publishing Result Data..."
# changed to percentage
all_loss = 100.0 * setalliloss(RESULT)
if _DEBUG:
# print [len(t.member_index) for t in RESULT]
print "Number of buckets %d" % len(RESULT)
print "iloss = %0.2f" % all_loss + "%"
# transform result
result = [(t.member_index[:], t.value) for t in RESULT]
return (result, all_loss)
| [
"[email protected]"
] | |
917995ac73a5bf09f647a885d5c631b5e4545361 | 5f92dd1e312a22c84eb826035b859b5fbffb7f4d | /FoTStreamServer/conceptdriftOld/algorithms/cusum.py | 05f8ff161141730c3fdcd0d7a37383d8942f8561 | [] | no_license | BrennoMello/FoT-Simulation-SDN-ML | a5a967d31c8df99d2f77978efa36eb8ed8d09928 | bfaa7439db3d3b4b2470e342c5fdbe41106ed700 | refs/heads/master | 2023-05-08T10:01:01.099838 | 2021-06-03T02:33:52 | 2021-06-03T02:33:52 | 351,583,489 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,947 | py | """
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
---
*** The Cumulative Sum (CUSUM) Method Implementation ***
Paper: Page, Ewan S. "Continuous inspection schemes."
Published in: Biometrika 41.1/2 (1954): 100-115.
URL: http://www.jstor.org/stable/2333009
"""
from .detector import SuperDetector
class CUSUM(SuperDetector):
"""The Cumulative Sum (CUSUM) drift detection method class."""
def __init__(self, min_instance=30, delta=0.005, lambda_=50):
super().__init__()
self.MINIMUM_NUM_INSTANCES = min_instance
self.m_n = 1
self.x_mean = 0
self.sum = 0
self.delta = delta
self.lambda_ = lambda_
def run(self, pr):
# pr = 1 if pr is False else 0
warning_status = False
drift_status = False
# 1. UPDATING STATS
self.x_mean = self.x_mean + (pr - self.x_mean) / self.m_n
self.sum = self.sum + pr - self.x_mean - self.delta
self.m_n += 1
# 2. UPDATING WARNING AND DRIFT STATUSES
if self.m_n >= self.MINIMUM_NUM_INSTANCES:
if abs(self.sum) > self.lambda_:
drift_status = True
#return warning_status, drift_status
return drift_status
def reset(self):
super().reset()
self.m_n = 1
self.x_mean = 0
self.sum = 0
def get_settings(self):
return [
str(self.MINIMUM_NUM_INSTANCES)
+ "."
+ str(self.delta)
+ "."
+ str(self.lambda_),
"$n_{min}$:"
+ str(self.MINIMUM_NUM_INSTANCES)
+ ", "
+ "$\delta$:"
+ str(self.delta).upper()
+ ", "
+ "$\lambda$:"
+ str(self.lambda_).upper(),
]
| [
"None"
] | None |
0a3c8dbe5801337e199918aea0ad1750be687380 | 224459455cda5cc97511c200ee6200038e2d415b | /meshdynamic/meshDynamic-Density.py | 51cda0fd7015ab6166cc279518c001e6dcd14ae9 | [
"MIT"
] | permissive | deepkashiwa20/DeepUrbanEvent | d252a659a875030c300f1ce10127dc3158e0cd97 | 69292578d2a2d5d47ae50f0ea2f3810f5199a1c5 | refs/heads/main | 2023-07-18T09:58:26.442342 | 2021-08-24T02:43:13 | 2021-08-24T02:43:13 | 383,077,914 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25,307 | py | import csv
import numpy as np
import os
import sys
import time
import jismesh.utils as ju
import pandas as pd
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(rootPath)
from common.datastructure.Point import Point
from common.datastructure.Mesh import Mesh
# meshTokyo = Mesh('tokyo','500m')
# GRIDNUMBER = meshTokyo.lonNum * meshTokyo.latNum
# print(meshTokyo.size, GRIDNUMBER)
# InterpolatedStep = 12
def getTimestamps(fileName):
last_tid = ''
D = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
timestamp = line[1]
D.append(timestamp)
last_tid = tid
return D
def getMesh(mesh, readFileName, writeFileName):
cnt = 0
wf = open(writeFileName, 'w')
with open(readFileName, 'r') as rf:
for line in csv.reader(rf):
if cnt % 1000000 == 0:
print(cnt)
tid = line[0]
timestamp = line[1]
p = Point(float(line[2]), float(line[3]))
meshid = mesh.inWhichGrid(p)
wf.write(','.join([tid, timestamp, str(meshid)])+'\n')
cnt += 1
wf.close()
def genMeshDynamic(mesh, fileName, meshFileName):
MD = {}
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
timestamp = line[1]
meshid = line[2]
key = (timestamp, meshid)
if key in MD:
MD[key].add(tid)
else:
MD[key] = set(tid)
wf = open(meshFileName, 'w')
Timestamps = getTimestamps(fileName)
for ts in Timestamps:
for meshid in range(mesh.lonNum * mesh.latNum):
key = (ts, str(meshid))
if key in MD:
value = len(MD[key])
else:
value = 0
wf.write(','.join([key[0], key[1], str(value)]) + '\n')
wf.close()
def getGrids(fileName):
last_tid = ''
G = []
with open(fileName, "r") as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
if last_tid != '' and last_tid != tid:
break
grid = line[1]
G.append(grid)
last_tid = tid
return G
def getDynamicMesh_mobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getDynamicMeshMobmap(trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getDynamicMesh Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append({})
with open(trajFileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
# tid = line[0]
timestamp = line[1]
lon = float(line[2])
lat = float(line[3])
meshcode = ju.to_meshcode(lat, lon, meshcode_level)
if meshcode in R[TS[timestamp]]:
R[TS[timestamp]][meshcode] += 1
else:
R[TS[timestamp]][meshcode] = 1
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def getRfromDynamicMeshMobmap(meshcode_level, dynamicFileName, dynamicFileName1, dynamicFileName2):
df1 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df1.iloc[:,2] = np.log10(df1.iloc[:,2]+1) * 100
df2 = pd.read_csv(dynamicFileName, header=None, skiprows=2)
df2.iloc[:, 2] = np.log(df2.iloc[:,2]+1) * 100
with open(dynamicFileName1, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
with open(dynamicFileName2, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level) + '\n')
df1.to_csv(dynamicFileName1, header=False, index=False, mode='a')
df2.to_csv(dynamicFileName2, header=False, index=False, mode='a')
def getDynamicMeshMobmapR(R, trajFileName, dynamicFileName, meshcode_level):
Timestamps = getTimestamps(trajFileName)
print('getDynamicMesh Count Ended : ', time.ctime())
with open(dynamicFileName, 'w') as wf:
wf.write("@dynamic-mesh\n")
wf.write("@use-mesh-code," + str(meshcode_level))
for i in range(len(R)):
timestamp = Timestamps[i]
for key in R[i]:
meshcode = key
meshpop = R[i][meshcode]
wf.write(','.join([timestamp, meshcode, str(meshpop)]) + '\n')
print('getDynamicMesh Ended : ', time.ctime())
def genMeshDynamicTimeInterval(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicTimeInterval_Mobmap(fileName, meshFileName, startTimestamp, endTimestamp):
Timestamps = getTimestamps(fileName)
startIndex = Timestamps.index(startTimestamp)
endIndex = Timestamps.index(endTimestamp)
Interval = [Timestamps[t] for t in range(startIndex, endIndex)]
def strHH(timestamp):
return timestamp[11:13] + timestamp[14:16]
wf = open(meshFileName[:-4] + '_' + strHH(startTimestamp) + '_' + strHH(endTimestamp) + '.csv', 'w')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if line[0] == '@dynamic-mesh' or '"@use-mesh-code':
wf.write(line + '\n')
if line[0] in Interval:
wf.write(','.join(line) + '\n')
else:
pass
wf.close()
def genMeshDynamicMobmap(mesh, meshFileName, mobmapFile, timestamp):
wf = open(mobmapFile, 'w')
wf.write('@static-mesh' + '\n')
wf.write(','.join([str(x) for x in
[mesh.minLat, mesh.minLon, mesh.dLat, mesh.dLon]]) + '\n')
with open(meshFileName, 'r') as rf:
for line in csv.reader(rf):
if timestamp != line[0]:
continue
else:
meshid = line[1]
number = line[2]
xi, yi = mesh.Index[int(meshid)]
wf.write(','.join([str(item) for item in [yi, xi, number]]) + '\n')
wf.close()
def loadGTrajectory(fileName):
print('loadTrajectory Started : ', time.ctime())
TDB = {}
with open(fileName, 'r') as rf:
reader = csv.reader(rf)
for line in reader:
tid = line[0]
# timestamp = line[1]
meshid = line[2]
if tid in TDB:
TDB[tid].append(meshid)
else:
TDB[tid] = [meshid]
print('loadTrajectory Ended : ', time.ctime())
return TDB
def getINDEX(mesh, gTrajFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('getTrajectoryINDEX Started : ', time.ctime())
Timestamps = getTimestamps(gTrajFileName)
print('timestamps...', len(Timestamps))
TDB = loadGTrajectory(gTrajFileName)
INDEX = []
for i in range(len(Timestamps)):
INDEX.append([])
for G in range(GRIDNUMBER):
INDEX[i].append(set()) # set().add
# print(np.array(INDEX).shape)
for tid in TDB:
traj = TDB[tid]
for i in range(len(traj)):
HH = i
if traj[i] == 'None':
pass
else:
gid = int(traj[i])
INDEX[HH][gid].add(tid) # set().add
return INDEX
def getGridImageIndex(mesh, window=15):
GRIDNUMBER = mesh.lonNum * mesh.latNum
IMG = []
for g in range(GRIDNUMBER):
R = np.zeros((window, window), dtype='int32')
current_x, current_y = mesh.Index[g]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
IMG.append(R)
return IMG
def genGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
for i in range(len(Timestamps) - 1):
for j in range(GRIDNUMBER):
cur_time = i
next_time = i + 1
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
trajfirst = INDEX[cur_time][cur_grid]
trajsecond = INDEX[next_time][next_grid]
transit_num = len(trajfirst & trajsecond)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
# This grid transit version is for 1minutes trajectory, more accurate, not for 5minutes.
# !!!!!!!!!!!!!!!!!!!! 1 minute trajectory data.
# TT is supposed to be 288 not 289 because it is interval.
def genGridTransit_5minutes_from_1minute(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
print('genGridTransit Started : ', time.ctime())
transitWriteFile = open(transitFileName, 'w')
INDEX = getINDEX(mesh, gTrajFileName)
# Timestamps = getTimestamps(gTrajFileName)
GridImageIndex = getGridImageIndex(mesh)
print('INDEX, Timestamps, GridImageIndex have been prepared.', time.ctime())
TT, SPAN = 24 * 12, 5
for i in range(TT):
for j in range(GRIDNUMBER):
cur_time = i
cur_grid = j
transitgrids = GridImageIndex[cur_grid]
Transit = np.zeros(transitgrids.shape, dtype='int32')
for ii in range(transitgrids.shape[0]):
for jj in range(transitgrids.shape[1]):
next_grid = transitgrids[ii][jj]
if next_grid != -1:
cur_time_start = cur_time * SPAN
cur_time_end = (cur_time + 1) * SPAN + 1
SS = set()
for pp in range(cur_time_start, cur_time_end):
trajfirst = INDEX[pp][cur_grid]
for qq in range(pp, cur_time_end):
trajsecond = INDEX[qq][next_grid]
SS.update(trajfirst & trajsecond)
transit_num = len(SS)
Transit[ii][jj] = transit_num
else:
pass
FlattedTransit = Transit.reshape(-1).tolist()
lineitem = [str(i), str(j)]
lineitem.extend([str(t) for t in FlattedTransit])
line = ','.join(lineitem) + '\n'
transitWriteFile.write(line)
print('genGridTransit timestamp: ', i)
transitWriteFile.close()
print('genGridTransit Ended: ', time.ctime())
def getGridTransit(mesh, gTrajFileName, transitFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps) - 1 # -1 is because of transit
print('getGridTransit Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def getGridPop(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 80, 80, 1
return R
def getGridPopPartition(R, M, K):
# Original 8*8 matrix N = 8 = M*K
# M = 4 # M*M sub matrix
# K = 2 # each sub matrix has the size of K * K
P = []
for i in range(M):
for j in range(M):
P.append(R[:, i*K:i*K+K, j*K:j*K+K, :])
return np.array(P)
def getGridPop2DNumpy(mesh, gTrajFileName, popFileName):
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = getTimestamps(gTrajFileName)
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
print('getGridPop Started : ', time.ctime())
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
return R
def getGridPopTimeInterval(mesh, popFileName):
print('getGridPop', popFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
Timestamps = []
lastTimestamp = ''
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = line[0]
if timestamp != lastTimestamp:
Timestamps.append(timestamp)
lastTimestamp = timestamp
TIMENUMBER = len(Timestamps)
TS = {}
for i in range(TIMENUMBER):
TS[Timestamps[i]] = i
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(popFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = TS[line[0]]
grid = int(line[1])
R[timestamp][grid] = int(line[2])
R = np.array(R, dtype='int32') # shape 145, 6000
R = R.reshape(R.shape[0], int(R.shape[1] ** 0.5), int(R.shape[1] ** 0.5), 1)
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # shape 145, 75, 80, 1
return R
def getGridTransitTimeInterval(mesh, transitFileName):
print('getGridTransit Started : ', transitFileName, time.ctime())
GRIDNUMBER = mesh.lonNum * mesh.latNum
# Timestamps = []
# lastTimestamp = ''
# with open(transitFileName, 'r') as rf:
# tansistReader = csv.reader(rf)
# for line in tansistReader:
# timestamp = line[0]
# if timestamp != lastTimestamp:
# Timestamps.append(timestamp)
# lastTimestamp = timestamp
# TIMENUMBER = len(Timestamps)
TIMENUMBER = 24 * 12
R = []
for i in range(TIMENUMBER):
R.append([])
for j in range(GRIDNUMBER):
R[i].append([])
with open(transitFileName, 'r') as rf:
tansistReader = csv.reader(rf)
for line in tansistReader:
timestamp = int(line[0])
grid = int(line[1])
R[timestamp][grid] = line[2:]
R = np.array(R, dtype='int32') # 144, 6000, 225
R = R.reshape(R.shape[0], mesh.lonNum, mesh.latNum, R.shape[2])
R = np.swapaxes(R, 2, 1)
R = R[:, ::-1, :, :] # 144, 75, 80, 225
return R
def shuffleTrainValidateTest(InterpolatedStep, path, fileName, R, testRate=0.2):
TIMESTEP = InterpolatedStep * 2
Sequence = []
for i in range(R.shape[0] - TIMESTEP):
Sequence.append(R[i:i+TIMESTEP, :, :, :])
Sequence = np.array(Sequence, dtype='int32')
INDEX = list(range(len(Sequence)))
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
np.random.shuffle(INDEX)
trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate))]
testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
train = Sequence[trainINDEX]
test = Sequence[testINDEX]
np.save(path + 'train_' + fileName, train)
np.save(path + 'test_' + fileName, test)
print(train.shape, test.shape)
# trainINDEX = INDEX[:int(len(INDEX) * (1 - testRate - validateRate))]
# validateINDEX = INDEX[int(len(INDEX) * (1 - testRate - validateRate)):int(len(INDEX) * (1 - testRate))]
# testINDEX = INDEX[int(len(INDEX) * (1 - testRate)):]
# train = Sequence[trainINDEX]
# validate = Sequence[validateINDEX]
# test = Sequence[testINDEX]
# np.save(path + 'train_' + fileName, train)
# np.save(path + 'validate_' + fileName, validate)
# np.save(path + 'test_' + fileName, test)
# print(train.shape, validate.shape, test.shape)
# or directly return not save to file because just too big.
# return train, validate, test
def getShuffledTrainTest(path, fileName, TrainTest):
return np.load(path + TrainTest + '_' + fileName + '.npy')
def testcode(mesh):
GRIDNUMBER = mesh.lonNum * mesh.latNum
window = 5
R = np.zeros((window, window), dtype='int32')
center = mesh.ReverseIndex[(2,2)]
current_x, current_y = mesh.Index[center]
start = 0 - window // 2
end = window + start
for i, dx in enumerate(list(range(start, end))):
for j, dy in enumerate(list(range(start, end))):
x = current_x + dx
y = current_y + dy
if mesh.inMesh(x, y):
grid = mesh.ReverseIndex[(x, y)]
R[j][i] = grid
else:
R[j][i] = -1
R = R[::-1, :]
print(R)
for i in range(len(R)):
print(R[i])
for i in range(len(R)):
print(R[i][0], R[i][1], R[i][2], R[i][3], R[i][4])
T = R.reshape(-1)
print(T.tolist())
P = T.reshape(window, window)
print(P)
print(R.shape)
print(R[54][4178])
print(np.max(R) == 3369)
print(mesh.Index[3369])
x, y = mesh.Index[3369]
lon, lat = mesh.minLon + (x + 0.5) * mesh.dLon, \
mesh.minLat + (y + 0.5) * mesh.dLat
print(lon, lat)
print(mesh.lonNum, mesh.latNum)
T = np.array(range(GRIDNUMBER))
T = T.reshape(mesh.lonNum, mesh.latNum)
T = np.swapaxes(T, 1, 0)
T = T[::-1, :]
print(T)
print(T.shape)
def run5min201802(mesh, dataPATH, dates):
print('Now is getting trainig XS and YS...', dates)
# timestamp = '2011-10-20 09:00:00'
# filenameTime = timestamp[0:4] + timestamp[5:7] + timestamp[8:10] \
# + timestamp[11:13] + timestamp[14:16] + timestamp[17:19]
# print(filenameTime)
for date in dates:
# first step: from trajectory point to mesh
getMesh(dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv')
# second step: calculate mesh population at each timestamp
genMeshDynamic(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_pop.csv')
# fourth step: mesh transit between two consecutive timestamps
genGridTransit(dataPATH + date + 'tokyo_' + mesh.size + '_5min.csv',
dataPATH + date + 'tokyo_' + mesh.size + '_5min_transit.csv')
def getHHTransit(HH):
assert HH <= 22, 'Hour should not be over 22.'
dataPATH = '../interpo_data/'
date = '20111020'
R = getGridTransit(dataPATH + date + 'tokyo_meshtransit10min_1min_15.csv')
# (144, 72, 80, 225)
R = R[HH*6:HH*6+6, :, :, :]
# (6, 72, 80, 225)
R = R.reshape(R.shape[0], -1, R.shape[-1])
# (6, 5760, 225)
R = R.transpose(1, 0, 2)
# (5760, 6, 225)
R = R.reshape(R.shape[0], R.shape[1], int(R.shape[2]**0.5), int(R.shape[2]**0.5), 1)
return R
def runCrowdDensity():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217","20110218","20110219","20110220", "20110221",
"20110222","20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv')
genMeshDynamic(meshTokyo, dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_pop.csv')
# def runCrowdFlow_from5min():
# from common.dataparam.Param import alldates
# dataPATH = '../interpo_data/'
# meshTokyo = Mesh('tokyo', '500m')
# #meshcode_level = 4
#
# for date in alldates:
# print('this is date', date)
# genGridTransit(meshTokyo,
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min.csv',
# dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit_from5min.csv')
# paper crowd flow is from 1min.!!!!!!!!!!!!
def runCrowdFlow():
dataPATH = '../interpo_data/'
meshTokyo = Mesh('tokyo', '500m')
#meshcode_level = 4
alldates = ["20110217", "20110218", "20110219", "20110220", "20110221",
"20110222", "20110223", "20110224", "20110225", "20110226", "20110227"]
for date in alldates:
print('this is date', date)
getMesh(meshTokyo, dataPATH + date + 'tokyo_interpo1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv')
genGridTransit_5minutes_from_1minute(meshTokyo,
dataPATH + date + 'tokyo_' + meshTokyo.size + '_1min.csv',
dataPATH + date + 'tokyo_' + meshTokyo.size + '_5min_transit.csv')
def main():
runCrowdDensity()
if __name__ == '__main__':
main() | [
"[email protected]"
] | |
8ed210de7b773b2d858db28b0e98b28b8888fd8a | b2d3bd39b2de8bcc3b0f05f4800c2fabf83d3c6a | /examples/pwr_run/checkpointing/final/predict_error/job66.py | 764442a07ebe3d86ebd0814af1b025e37c398848 | [
"MIT"
] | permissive | boringlee24/keras_old | 3bf7e3ef455dd4262e41248f13c04c071039270e | 1e1176c45c4952ba1b9b9e58e9cc4df027ab111d | refs/heads/master | 2021-11-21T03:03:13.656700 | 2021-11-11T21:57:54 | 2021-11-11T21:57:54 | 198,494,579 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,630 | py | """
#Trains a ResNet on the CIFAR10 dataset.
"""
from __future__ import print_function
import keras
from keras.layers import Dense, Conv2D, BatchNormalization, Activation
from keras.layers import AveragePooling2D, Input, Flatten
from keras.optimizers import Adam
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
from keras.callbacks import ReduceLROnPlateau, TensorBoard
from keras.preprocessing.image import ImageDataGenerator
from keras.regularizers import l2
from keras import backend as K
from keras.models import Model
from keras.datasets import cifar10
from keras.applications.vgg16 import VGG16
from keras.applications.vgg19 import VGG19
from keras import models, layers, optimizers
from datetime import datetime
import tensorflow as tf
import numpy as np
import os
import pdb
import sys
import argparse
import time
import signal
import glob
import json
import send_signal
parser = argparse.ArgumentParser(description='Tensorflow Cifar10 Training')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='specific testcase name')
parser.add_argument('--resume', dest='resume', action='store_true', help='if True, resume training from a checkpoint')
parser.add_argument('--gpu_num', metavar='GPU_NUMBER', type=str, help='select which gpu to use')
parser.add_argument('--node', metavar='HOST_NODE', type=str, help='node of the host (scheduler)')
parser.set_defaults(resume=False)
args = parser.parse_args()
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_num
# Training parameters
batch_size = 32
args_lr = 0.0005
args_model = 'vgg16'
epoch_begin_time = 0
job_name = sys.argv[0].split('.')[0]
save_files = '/scratch/li.baol/checkpoint_predict_error/' + job_name + '*'
total_epochs = 14
starting_epoch = 0
# first step is to update the PID
pid = os.getpid()
message = job_name + ' pid ' + str(pid) # 'job50 pid 3333'
send_signal.send(args.node, 10002, message)
if args.resume:
save_file = glob.glob(save_files)[0]
# epochs = int(save_file.split('/')[4].split('_')[1].split('.')[0])
starting_epoch = int(save_file.split('/')[4].split('.')[0].split('_')[-1])
data_augmentation = True
num_classes = 10
# Subtracting pixel mean improves accuracy
subtract_pixel_mean = True
n = 3
# Model name, depth and version
model_type = args.tc #'P100_resnet50_he_256_1'
# Load the CIFAR10 data.
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Normalize data.
x_train = x_train.astype('float32') / 255
x_test = x_test.astype('float32') / 255
# If subtract pixel mean is enabled
if subtract_pixel_mean:
x_train_mean = np.mean(x_train, axis=0)
x_train -= x_train_mean
x_test -= x_train_mean
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
print('y_train shape:', y_train.shape)
# Convert class vectors to binary class matrices.
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
if args.resume:
print('resume from checkpoint')
message = job_name + ' b_end'
send_signal.send(args.node, 10002, message)
model = keras.models.load_model(save_file)
message = job_name + ' c_end'
send_signal.send(args.node, 10002, message)
else:
print('train from start')
model = models.Sequential()
if '16' in args_model:
base_model = VGG16(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
elif '19' in args_model:
base_model = VGG19(weights=None, include_top=False, input_shape=(32, 32, 3), pooling=None)
#base_model.summary()
#pdb.set_trace()
model.add(base_model)
model.add(layers.Flatten())
model.add(layers.BatchNormalization())
model.add(layers.Dense(128, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(64, activation='relu'))#, kernel_initializer='he_uniform'))
#model.add(layers.Dropout(0.2))
model.add(layers.BatchNormalization())
model.add(layers.Dense(10, activation='softmax'))#, kernel_initializer='he_uniform'))
model.compile(loss='categorical_crossentropy',
optimizer=Adam(lr=args_lr),
metrics=['accuracy'])
#model.summary()
print(model_type)
#pdb.set_trace()
current_epoch = 0
################### connects interrupt signal to the process #####################
def terminateProcess(signalNumber, frame):
# first record the wasted epoch time
global epoch_begin_time
if epoch_begin_time == 0:
epoch_waste_time = 0
else:
epoch_waste_time = int(time.time() - epoch_begin_time)
message = job_name + ' waste ' + str(epoch_waste_time) # 'job50 waste 100'
if epoch_waste_time > 0:
send_signal.send(args.node, 10002, message)
print('checkpointing the model triggered by kill -15 signal')
# delete whatever checkpoint that already exists
for f in glob.glob(save_files):
os.remove(f)
model.save('/scratch/li.baol/checkpoint_predict_error/' + job_name + '_' + str(current_epoch) + '.h5')
print ('(SIGTERM) terminating the process')
message = job_name + ' checkpoint'
send_signal.send(args.node, 10002, message)
sys.exit()
signal.signal(signal.SIGTERM, terminateProcess)
#################################################################################
logdir = '/scratch/li.baol/tsrbrd_log/job_runs/' + model_type + '/' + job_name
tensorboard_callback = TensorBoard(log_dir=logdir)#, update_freq='batch')
first_epoch_start = 0
class PrintEpoch(keras.callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
global current_epoch, first_epoch_start
#remaining_epochs = epochs - epoch
current_epoch = epoch
print('current epoch ' + str(current_epoch))
global epoch_begin_time
epoch_begin_time = time.time()
if epoch == starting_epoch and args.resume:
first_epoch_start = time.time()
message = job_name + ' d_end'
send_signal.send(args.node, 10002, message)
elif epoch == starting_epoch:
first_epoch_start = time.time()
if epoch == starting_epoch:
# send signal to indicate checkpoint is qualified
message = job_name + ' ckpt_qual'
send_signal.send(args.node, 10002, message)
def on_epoch_end(self, epoch, logs=None):
if epoch == starting_epoch:
first_epoch_time = int(time.time() - first_epoch_start)
message = job_name + ' 1st_epoch ' + str(first_epoch_time)
send_signal.send(args.node, 10002, message)
progress = round((epoch+1) / round(total_epochs/2), 2)
message = job_name + ' completion ' + str(progress)
send_signal.send(args.node, 10002, message)
my_callback = PrintEpoch()
callbacks = [tensorboard_callback, my_callback]
#[checkpoint, lr_reducer, lr_scheduler, tensorboard_callback]
# Run training
model.fit(x_train, y_train,
batch_size=batch_size,
epochs=round(total_epochs/2),
validation_data=(x_test, y_test),
shuffle=True,
callbacks=callbacks,
initial_epoch=starting_epoch,
verbose=1
)
# Score trained model.
scores = model.evaluate(x_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# send signal to indicate job has finished
message = job_name + ' finish'
send_signal.send(args.node, 10002, message)
| [
"[email protected]"
] | |
4e3212529919c336eaa65fb17c0588a940065f09 | 8e1d25698853dd5bd1fe89327810afaf22aa7306 | /companies/api/urls.py | d073232701aab54911a84cdb672e6d455d5c9203 | [] | no_license | briancaffey/demo | 3351157c7628b58347dd5d3f749429c5159c8616 | 118b022f9a5558c9dacc0681e8c9593c462d36a4 | refs/heads/master | 2020-06-26T23:14:59.745396 | 2017-07-16T20:23:04 | 2017-07-16T20:23:04 | 97,034,442 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django.conf.urls import url
from .views import (
CompanyListAPIView,
)
urlpatterns = [
url(r'^api/$', CompanyListAPIView.as_view(), name='company_list'),
]
| [
"[email protected]"
] | |
037b4a377cefa4f9c913c9b919835caeb0f89ad1 | 65f378ce824afb65ff44a9e7634eb5e403a15250 | /discussions/01-python_basics/tests/q32.py | 758113e39d3b23c219799996407eb3ed034b979d | [] | no_license | ucsd-ets/dsc10-2021-sp-public | c067f00a853f12510ac3a897a40296e00b8db247 | d4af09fedd51f988de8136173ba40dc0d0e19c2d | refs/heads/master | 2023-05-25T04:54:36.076251 | 2021-06-06T23:25:31 | 2021-06-06T23:25:31 | 351,519,451 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 235 | py | test = { 'name': 'q32',
'points': 1,
'suites': [{'cases': [{'code': '>>> county_most_fires == "Los Angeles County"\nTrue', 'hidden': False, 'locked': False}], 'scored': True, 'setup': '', 'teardown': '', 'type': 'doctest'}]}
| [
"nixrunner@localhost"
] | nixrunner@localhost |
cc7b73d9f854a55e82733530243f061b1a953b83 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib/wave.py | 28156a0cddf1d07e96068e0ef3c724e3e4e903a9 | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,852 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
"""Stuff to parse WAVE files.
Usage.
Reading WAVE files:
f = wave.open(file, 'r')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods read(), seek(), and close().
When the setpos() and rewind() methods are not used, the seek()
method is not necessary.
This returns an instance of a class with the following public methods:
getnchannels() -- returns number of audio channels (1 for
mono, 2 for stereo)
getsampwidth() -- returns sample width in bytes
getframerate() -- returns sampling frequency
getnframes() -- returns number of audio frames
getcomptype() -- returns compression type ('NONE' for linear samples)
getcompname() -- returns human-readable version of
compression type ('not compressed' linear samples)
getparams() -- returns a tuple consisting of all of the
above in the above order
getmarkers() -- returns None (for compatibility with the
aifc module)
getmark(id) -- raises an error since the mark does not
exist (for compatibility with the aifc module)
readframes(n) -- returns at most n frames of audio
rewind() -- rewind to the beginning of the audio stream
setpos(pos) -- seek to the specified position
tell() -- return the current position
close() -- close the instance (make it unusable)
The position returned by tell() and the position given to setpos()
are compatible and have nothing to do with the actual position in the
file.
The close() method is called automatically when the class instance
is destroyed.
Writing WAVE files:
f = wave.open(file, 'w')
where file is either the name of a file or an open file pointer.
The open file pointer must have methods write(), tell(), seek(), and
close().
This returns an instance of a class with the following public methods:
setnchannels(n) -- set the number of channels
setsampwidth(n) -- set the sample width
setframerate(n) -- set the frame rate
setnframes(n) -- set the number of frames
setcomptype(type, name)
-- set the compression type and the
human-readable compression type
setparams(tuple)
-- set all parameters at once
tell() -- return current position in output file
writeframesraw(data)
-- write audio frames without pathing up the
file header
writeframes(data)
-- write audio frames and patch up the file header
close() -- patch up the file header and close the
output file
You should set the parameters before the first writeframesraw or
writeframes. The total number of frames does not need to be set,
but when it is set to the correct value, the header does not have to
be patched up.
It is best to first set all parameters, perhaps possibly the
compression type, and then write audio frames using writeframesraw.
When all frames have been written, either call writeframes('') or
close() to patch up the sizes in the header.
The close() method is called automatically when the class instance
is destroyed.
"""
import __builtin__
__all__ = ["open", "openfp", "Error"]
class Error(Exception):
pass
WAVE_FORMAT_PCM = 0x0001
_array_fmts = None, 'b', 'h', None, 'l'
# Determine endian-ness
import struct
if struct.pack("h", 1) == "\000\001":
big_endian = 1
else:
big_endian = 0
from chunk import Chunk
class Wave_read:
"""Variables used in this class:
These variables are available to the user though appropriate
methods of this class:
_file -- the open file with methods read(), close(), and seek()
set through the __init__() method
_nchannels -- the number of audio channels
available through the getnchannels() method
_nframes -- the number of audio frames
available through the getnframes() method
_sampwidth -- the number of bytes per audio sample
available through the getsampwidth() method
_framerate -- the sampling frequency
available through the getframerate() method
_comptype -- the AIFF-C compression type ('NONE' if AIFF)
available through the getcomptype() method
_compname -- the human-readable AIFF-C compression type
available through the getcomptype() method
_soundpos -- the position in the audio stream
available through the tell() method, set through the
setpos() method
These variables are used internally only:
_fmt_chunk_read -- 1 iff the FMT chunk has been read
_data_seek_needed -- 1 iff positioned correctly in audio
file for readframes()
_data_chunk -- instantiation of a chunk class for the DATA chunk
_framesize -- size of one frame in the file
"""
def initfp(self, file):
self._convert = None
self._soundpos = 0
self._file = Chunk(file, bigendian = 0)
if self._file.getname() != 'RIFF':
raise Error, 'file does not start with RIFF id'
if self._file.read(4) != 'WAVE':
raise Error, 'not a WAVE file'
self._fmt_chunk_read = 0
self._data_chunk = None
while 1:
self._data_seek_needed = 1
try:
chunk = Chunk(self._file, bigendian = 0)
except EOFError:
break
chunkname = chunk.getname()
if chunkname == 'fmt ':
self._read_fmt_chunk(chunk)
self._fmt_chunk_read = 1
elif chunkname == 'data':
if not self._fmt_chunk_read:
raise Error, 'data chunk before fmt chunk'
self._data_chunk = chunk
self._nframes = chunk.chunksize // self._framesize
self._data_seek_needed = 0
break
chunk.skip()
if not self._fmt_chunk_read or not self._data_chunk:
raise Error, 'fmt chunk and/or data chunk missing'
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'rb')
self._i_opened_the_file = f
# else, assume it is an open file object already
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def __del__(self):
self.close()
#
# User visible methods.
#
def getfp(self):
return self._file
def rewind(self):
self._data_seek_needed = 1
self._soundpos = 0
def close(self):
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
self._file = None
def tell(self):
return self._soundpos
def getnchannels(self):
return self._nchannels
def getnframes(self):
return self._nframes
def getsampwidth(self):
return self._sampwidth
def getframerate(self):
return self._framerate
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def getparams(self):
return self.getnchannels(), self.getsampwidth(), \
self.getframerate(), self.getnframes(), \
self.getcomptype(), self.getcompname()
def getmarkers(self):
return None
def getmark(self, id):
raise Error, 'no marks'
def setpos(self, pos):
if pos < 0 or pos > self._nframes:
raise Error, 'position not in range'
self._soundpos = pos
self._data_seek_needed = 1
def readframes(self, nframes):
if self._data_seek_needed:
self._data_chunk.seek(0, 0)
pos = self._soundpos * self._framesize
if pos:
self._data_chunk.seek(pos, 0)
self._data_seek_needed = 0
if nframes == 0:
return ''
if self._sampwidth > 1 and big_endian:
# unfortunately the fromfile() method does not take
# something that only looks like a file object, so
# we have to reach into the innards of the chunk object
import array
chunk = self._data_chunk
data = array.array(_array_fmts[self._sampwidth])
nitems = nframes * self._nchannels
if nitems * self._sampwidth > chunk.chunksize - chunk.size_read:
nitems = (chunk.chunksize - chunk.size_read) / self._sampwidth
data.fromfile(chunk.file.file, nitems)
# "tell" data chunk how much was read
chunk.size_read = chunk.size_read + nitems * self._sampwidth
# do the same for the outermost chunk
chunk = chunk.file
chunk.size_read = chunk.size_read + nitems * self._sampwidth
data.byteswap()
data = data.tostring()
else:
data = self._data_chunk.read(nframes * self._framesize)
if self._convert and data:
data = self._convert(data)
self._soundpos = self._soundpos + len(data) // (self._nchannels * self._sampwidth)
return data
#
# Internal methods.
#
def _read_fmt_chunk(self, chunk):
wFormatTag, self._nchannels, self._framerate, dwAvgBytesPerSec, wBlockAlign = struct.unpack('<HHLLH', chunk.read(14))
if wFormatTag == WAVE_FORMAT_PCM:
sampwidth = struct.unpack('<H', chunk.read(2))[0]
self._sampwidth = (sampwidth + 7) // 8
else:
raise Error, 'unknown format: %r' % (wFormatTag,)
self._framesize = self._nchannels * self._sampwidth
self._comptype = 'NONE'
self._compname = 'not compressed'
class Wave_write:
"""Variables used in this class:
These variables are user settable through appropriate methods
of this class:
_file -- the open file with methods write(), close(), tell(), seek()
set through the __init__() method
_comptype -- the AIFF-C compression type ('NONE' in AIFF)
set through the setcomptype() or setparams() method
_compname -- the human-readable AIFF-C compression type
set through the setcomptype() or setparams() method
_nchannels -- the number of audio channels
set through the setnchannels() or setparams() method
_sampwidth -- the number of bytes per audio sample
set through the setsampwidth() or setparams() method
_framerate -- the sampling frequency
set through the setframerate() or setparams() method
_nframes -- the number of audio frames written to the header
set through the setnframes() or setparams() method
These variables are used internally only:
_datalength -- the size of the audio samples written to the header
_nframeswritten -- the number of frames actually written
_datawritten -- the size of the audio samples actually written
"""
def __init__(self, f):
self._i_opened_the_file = None
if isinstance(f, basestring):
f = __builtin__.open(f, 'wb')
self._i_opened_the_file = f
try:
self.initfp(f)
except:
if self._i_opened_the_file:
f.close()
raise
def initfp(self, file):
self._file = file
self._convert = None
self._nchannels = 0
self._sampwidth = 0
self._framerate = 0
self._nframes = 0
self._nframeswritten = 0
self._datawritten = 0
self._datalength = 0
self._headerwritten = False
def __del__(self):
self.close()
#
# User visible methods.
#
def setnchannels(self, nchannels):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if nchannels < 1:
raise Error, 'bad # of channels'
self._nchannels = nchannels
def getnchannels(self):
if not self._nchannels:
raise Error, 'number of channels not set'
return self._nchannels
def setsampwidth(self, sampwidth):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if sampwidth < 1 or sampwidth > 4:
raise Error, 'bad sample width'
self._sampwidth = sampwidth
def getsampwidth(self):
if not self._sampwidth:
raise Error, 'sample width not set'
return self._sampwidth
def setframerate(self, framerate):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if framerate <= 0:
raise Error, 'bad frame rate'
self._framerate = framerate
def getframerate(self):
if not self._framerate:
raise Error, 'frame rate not set'
return self._framerate
def setnframes(self, nframes):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self._nframes = nframes
def getnframes(self):
return self._nframeswritten
def setcomptype(self, comptype, compname):
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
if comptype not in ('NONE',):
raise Error, 'unsupported compression type'
self._comptype = comptype
self._compname = compname
def getcomptype(self):
return self._comptype
def getcompname(self):
return self._compname
def setparams(self, params):
nchannels, sampwidth, framerate, nframes, comptype, compname = params
if self._datawritten:
raise Error, 'cannot change parameters after starting to write'
self.setnchannels(nchannels)
self.setsampwidth(sampwidth)
self.setframerate(framerate)
self.setnframes(nframes)
self.setcomptype(comptype, compname)
def getparams(self):
if not self._nchannels or not self._sampwidth or not self._framerate:
raise Error, 'not all parameters set'
return self._nchannels, self._sampwidth, self._framerate, \
self._nframes, self._comptype, self._compname
def setmark(self, id, pos, name):
raise Error, 'setmark() not supported'
def getmark(self, id):
raise Error, 'no marks'
def getmarkers(self):
return None
def tell(self):
return self._nframeswritten
def writeframesraw(self, data):
self._ensure_header_written(len(data))
nframes = len(data) // (self._sampwidth * self._nchannels)
if self._convert:
data = self._convert(data)
if self._sampwidth > 1 and big_endian:
import array
data = array.array(_array_fmts[self._sampwidth], data)
data.byteswap()
data.tofile(self._file)
self._datawritten = self._datawritten + len(data) * self._sampwidth
else:
self._file.write(data)
self._datawritten = self._datawritten + len(data)
self._nframeswritten = self._nframeswritten + nframes
def writeframes(self, data):
self.writeframesraw(data)
if self._datalength != self._datawritten:
self._patchheader()
def close(self):
if self._file:
self._ensure_header_written(0)
if self._datalength != self._datawritten:
self._patchheader()
self._file.flush()
self._file = None
if self._i_opened_the_file:
self._i_opened_the_file.close()
self._i_opened_the_file = None
#
# Internal methods.
#
def _ensure_header_written(self, datasize):
if not self._headerwritten:
if not self._nchannels:
raise Error, '# channels not specified'
if not self._sampwidth:
raise Error, 'sample width not specified'
if not self._framerate:
raise Error, 'sampling rate not specified'
self._write_header(datasize)
def _write_header(self, initlength):
assert not self._headerwritten
self._file.write('RIFF')
if not self._nframes:
self._nframes = initlength / (self._nchannels * self._sampwidth)
self._datalength = self._nframes * self._nchannels * self._sampwidth
self._form_length_pos = self._file.tell()
self._file.write(struct.pack('<L4s4sLHHLLHH4s',
36 + self._datalength, 'WAVE', 'fmt ', 16,
WAVE_FORMAT_PCM, self._nchannels, self._framerate,
self._nchannels * self._framerate * self._sampwidth,
self._nchannels * self._sampwidth,
self._sampwidth * 8, 'data'))
self._data_length_pos = self._file.tell()
self._file.write(struct.pack('<L', self._datalength))
self._headerwritten = True
def _patchheader(self):
assert self._headerwritten
if self._datawritten == self._datalength:
return
curpos = self._file.tell()
self._file.seek(self._form_length_pos, 0)
self._file.write(struct.pack('<L', 36 + self._datawritten))
self._file.seek(self._data_length_pos, 0)
self._file.write(struct.pack('<L', self._datawritten))
self._file.seek(curpos, 0)
self._datalength = self._datawritten
def open(f, mode=None):
if mode is None:
if hasattr(f, 'mode'):
mode = f.mode
else:
mode = 'rb'
if mode in ('r', 'rb'):
return Wave_read(f)
elif mode in ('w', 'wb'):
return Wave_write(f)
else:
raise Error, "mode must be 'r', 'rb', 'w', or 'wb'"
openfp = open # B/W compatibility
| [
"[email protected]"
] | |
28d69c649d104842a33a3b0faa6f551bdb3b6b1f | 64c90b077af7feed265041c0e94cbeee4be92f01 | /tools~/gen_docs.py | 96f62efb1a882dda15b4f1aafa1de18cfee027cc | [
"Zlib"
] | permissive | Fopoon/Fopoon.ScenesKit | 5713dd7f420a22079fd9ba57b5984836ec7ca35d | acb45bc02ea6f091f7ec36e232a3bc232f528992 | refs/heads/master | 2022-11-26T19:24:40.789089 | 2020-08-10T04:20:51 | 2020-08-10T04:20:51 | 286,259,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,089 | py | from json import loads
from pathlib import Path
from typing import Dict
from scriptgen import \
StringBuilder, \
diff_text, \
interpolate_text, \
write_text_file
def get_text(
template: str,
expressions: Dict[str, str],
template_name: str = None
) -> str:
sb = StringBuilder()
# Add a timestamp when the template is a markdown file.
if template_name.casefold().endswith(".tmd"):
from scriptgen.templates.markdown import markdown_autogen
sb.wb(markdown_autogen())
sb.nl()
# Replace placeholders.
# i.e. replace placeholders found in the text with values found in the expressions dictionary.
# ex: ${SOME_KEY} โ ACTUAL_VALUE
interpolated_text = interpolate_text(template, expressions)
# Write the interpolated text into the builder.
sb.wl(interpolated_text)
return str(sb)
if __name__ == "__main__":
fdp = Path(__file__).parent
templates_dir_name = "templates"
# tools/templates/VALUES.t.json
json_path = fdp / templates_dir_name / "VALUES.t.json"
json_text = json_path.read_text()
json = loads(json_text)
templates = {
# tools~/templates/README.tmd โ README.md
(fdp / templates_dir_name / "README.tmd"): (fdp.parent / "README.md"),
# tools~/templates/package.tjson โ package.json
(fdp / templates_dir_name / "package.tjson"): (fdp.parent / "package.json")
}
for template_path, target_path in templates.items():
template_text = template_path.read_text()
text = get_text(
template_text,
json,
template_name=template_path.name)
write_text_file(
text,
target_path,
# checks for relevant changes between two texts to determine whether to skip writing into a file.
diff_func=diff_text,
# filters out lines when checking for differences.
filter_func=lambda line, idx: idx < 5 and line.startswith("[//]: # (Auto-generated"),
log_func=lambda message: print(message)
)
| [
"[email protected]"
] | |
1d63518936cb6c7dd07da624adee9a388b513d68 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_086/ch2_2019_08_12_14_38_49_563421.py | b972fa739df8d6f21034c9041e0194bba7ac4e1c | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 84 | py |
def calcula_velocidade_media = (d,t)
return (d/t)
print(calcula_velocidade_media) | [
"[email protected]"
] | |
5124b6b8ae0f31df7809797678631fdaaf6b2c24 | fd326562890d4f1987c384fc7c60374938231222 | /PythonAdvanced/PongGame/game.py | 3e9796356f4d3b20fa9b545010373e402f2496ff | [] | no_license | miro-lp/SoftUni | cc3b0ff742218c9ceaf93f05c319ccfeed5bc8a4 | 283d9328537919de49f7f6a301e58593bae9ca2a | refs/heads/main | 2023-08-23T21:22:07.856226 | 2021-08-25T15:10:18 | 2021-08-25T15:10:18 | 318,134,101 | 2 | 1 | null | 2021-08-10T12:51:54 | 2020-12-03T09:03:08 | Python | UTF-8 | Python | false | false | 2,113 | py | class Game:
def __init__(self):
self.width = 800
self.high = 400
self.__ball_pos = (0, 0)
self.__ball_delta_x = 1
self.__ball_delta_y = 1
self.paddle_a_pos = (-self.width / 2 + 50, 0)
self.paddle_b_pos = (self.width / 2 - 50, 0)
self.paddle_height = self.high / 4
self.paddle_width = 20
self.points_a = 0
self.points_b = 0
def tick(self):
self.__border_check()
self.__paddle_hit()
x, y = self.__ball_pos
self.__ball_pos = (x + self.__ball_delta_x, y + self.__ball_delta_y)
def __border_check(self):
x, y = self.__ball_pos
if abs(y) >= self.high / 2:
self.__ball_delta_y *= -1
if x <= - self.width/2:
self.points_b+=1
self.__ball_pos = (0,0)
if x >= self.width / 2:
self.points_a += 1
self.__ball_pos = (0,0)
def ball_pos(self):
return self.__ball_pos
def paddle_a_up(self):
x, y = self.paddle_a_pos
if y <= self.high / 2 - self.paddle_height / 2:
self.paddle_a_pos = (x, y + 20)
def paddle_a_down(self):
x, y = self.paddle_a_pos
if y >= -self.high / 2 + self.paddle_height / 2:
self.paddle_a_pos = (x, y - 20)
def paddle_b_up(self):
x, y = self.paddle_b_pos
if y <= self.high / 2 - self.paddle_height / 2:
self.paddle_b_pos = (x, y + 20)
def paddle_b_down(self):
x, y = self.paddle_b_pos
if y >= -self.high / 2 + self.paddle_height / 2:
self.paddle_b_pos = (x, y - 20)
def __paddle_hit(self):
x, y = self.__ball_pos
a_x, a_y = self.paddle_a_pos
is_paddle_a_hit = (a_x+self.paddle_width == x and a_y - self.paddle_height / 2 <= y <= a_y + self.paddle_height / 2)
b_x, b_y = self.paddle_b_pos
is_paddle_b_hit = (b_x-self.paddle_width == x and b_y - self.paddle_height / 2 <= y <= b_y + self.paddle_height / 2)
if is_paddle_b_hit or is_paddle_a_hit:
self.__ball_delta_x *= -1
| [
"[email protected]"
] | |
3ebabab1cc0ec29075386911102d4c4d892eed04 | 16ba38ef11b82e93d3b581bbff2c21e099e014c4 | /haohaninfo/ๅณๆ็ฏไพ/61.py | 6e90415a943dd91a9a3bc4319e97582c7b9dabb8 | [] | no_license | penguinwang96825/Auto-Trading | cb7a5addfec71f611bdd82534b90e5219d0602dd | a031a921dbc036681c5054f2c035f94499b95d2e | refs/heads/master | 2022-12-24T21:25:34.835436 | 2020-09-22T09:59:56 | 2020-09-22T09:59:56 | 292,052,986 | 2 | 5 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | # -*- coding: UTF-8 -*-
#ๅๅพๅ ฑๅน่ณ่จ๏ผ่ฉณๆ
่ซๆฅ็ๆๅทง51
execfile('function.py')
#ๅๅพไธไธไบๆชๅน้่ณ่จ
for i in getUpDn5():
UpDn5Info=i.split(',')
UpDn5Time=UpDn5Info[0]
totalUpPrice=0
totalUpQty=0
totalDnPrice=0
totalDnQty=0
#้ๅง้ฒ่กไธไธไบๆชๅ ๆฌๅนณๅๅผ
for j in range(0,5):
totalDnPrice+=int(UpDn5Info[1+2*j])*int(UpDn5Info[2+2*j])
totalDnQty+=int(UpDn5Info[2+2*j])
totalUpPrice+=int(UpDn5Info[11+2*j])*int(UpDn5Info[12+2*j])
totalUpQty+=int(UpDn5Info[12+2*j])
print UpDn5Time,"avgUpPrice",float(totalUpPrice)/totalUpQty,"avgDnPrice",float(totalDnPrice)/totalDnQty
| [
"[email protected]"
] | |
85cee0f893635d531b95b5d312f8e209a6f535dc | fefb1e9b0b736da4e49d7754f8d1dbaf37f2fa6a | /.history/7_3_2_20210208211227.py | 2021ca036ff147a699a385552cc614517dc2ef19 | [] | no_license | wh-debug/python | 5a78a2227874ebc400d075197de0adab9f55d187 | 1467eeda670f170e6e2d7c0a0550f713f1ee9d75 | refs/heads/master | 2023-03-12T22:08:12.608882 | 2021-02-17T09:49:52 | 2021-02-17T09:49:52 | 334,032,494 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,191 | py | '''
Author: your name
Date: 2021-02-08 21:11:05
LastEditTime: 2021-02-08 21:12:27
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \python\7_3_2.py
'''
'''
_oo0oo_
o8888888o
88" . "88
(| -_- |)
0\ = /0
___/`---'\___
.' \\| |// '.
/ \\||| : |||// \
/ _||||| -:- |||||- \
| | \\\ - /// | |
| \_| ''\---/'' |_/ |
\ .-\__ '-' ___/-. /
___'. .' /--.--\ `. .'___
."" '< `.___\_<|>_/___.' >' "".
| | : `- \`.;`\ _ /`;.`/ - ` : | |
\ \ `_. \_ __\ /__ _/ .-` / /
=====`-.____`.___ \_____/___.-`___.-'=====
`=---='
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ไฝ็ฅไฟไฝ ๆฐธไธๅฎๆบ ๆฐธๆ BUG
'''
'''
Author: your name
Date: 2021-02-08 21:11:05
LastEditTime: 2021-02-08 21:11:05
LastEditors: your name
Description: In User Settings Edit
FilePath: \python\7_3_2.py
'''
'''ๅ ้ค็นๅฎๅ
็ด ็ๆๆๅ่กจๅ
็ด ''' | [
"[email protected]"
] | |
9961f02f3cc87bd6bb0fd66daadb71f7fbd6f526 | 3a642fa1fc158d3289358b53770cdb39e5893711 | /src/xlsxwriter/test/worksheet/test_write_sheet_views2.py | 1ac53cd2d1fb5d98adfaf23b01058d89c365b7e0 | [] | no_license | andbar-ru/traceyourself.appspot.com | d461277a3e6f8c27a651a1435f3206d7b9307d9f | 5f0af16ba2727faceb6b7e1b98073cd7d3c60d4c | refs/heads/master | 2020-07-23T14:58:21.511328 | 2016-12-26T22:03:01 | 2016-12-26T22:03:01 | 73,806,841 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,464 | py | ###############################################################################
#
# Tests for XlsxWriter.
#
# Copyright (c), 2013, John McNamara, [email protected]
#
import unittest
from ...compatibility import StringIO
from ...worksheet import Worksheet
class TestWriteSheetViews(unittest.TestCase):
"""
Test the Worksheet _write_sheet_views() method.
"""
def setUp(self):
self.fh = StringIO()
self.worksheet = Worksheet()
self.worksheet._set_filehandle(self.fh)
def test_write_sheet_views1(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 0)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane ySplit="1" topLeftCell="A2" activePane="bottomLeft" state="frozen"/><selection pane="bottomLeft"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views2(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(0, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" topLeftCell="B1" activePane="topRight" state="frozen"/><selection pane="topRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views3(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(1, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="1" ySplit="1" topLeftCell="B2" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="B1" sqref="B1"/><selection pane="bottomLeft" activeCell="A2" sqref="A2"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views4(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes('G4')
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozen"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
def test_write_sheet_views5(self):
"""Test the _write_sheet_views() method with freeze panes"""
self.worksheet.select()
self.worksheet.freeze_panes(3, 6, 3, 6, 1)
self.worksheet._write_sheet_views()
exp = '<sheetViews><sheetView tabSelected="1" workbookViewId="0"><pane xSplit="6" ySplit="3" topLeftCell="G4" activePane="bottomRight" state="frozenSplit"/><selection pane="topRight" activeCell="G1" sqref="G1"/><selection pane="bottomLeft" activeCell="A4" sqref="A4"/><selection pane="bottomRight"/></sheetView></sheetViews>'
got = self.fh.getvalue()
self.assertEqual(got, exp)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
8222ad5dbb1fe9a2d9066677863f31844b5d765d | ef78bd58d61002f45778a40da7759ed0b1998cd3 | /code/transforms/subspacegaussianization.py | 121e6249045137472b93b36408bb02b129606325 | [
"MIT"
] | permissive | afcarl/isa | 61e85c0c790c7cc357e0c29fc5bda948e9c77ce4 | f0497c0cc7bd72e0de7f4f9a8da40e214c22abe9 | refs/heads/master | 2020-03-19T21:36:06.716167 | 2013-01-28T18:32:30 | 2013-01-28T18:32:30 | 136,944,562 | 1 | 0 | null | 2018-06-11T15:20:45 | 2018-06-11T15:20:44 | null | UTF-8 | Python | false | false | 3,769 | py | __license__ = 'MIT License <http://www.opensource.org/licenses/mit-license.php>'
__author__ = 'Lucas Theis <[email protected]>'
__docformat__ = 'epytext'
from transform import Transform
from radialgaussianization import RadialGaussianization
from numpy.linalg import inv, slogdet
from numpy import vstack, dot, zeros
from collections import Callable
class SubspaceGaussianization(Transform):
def __init__(self, isa):
"""
@type gsm: L{ISA}
@param gsm: ISA model used for Gaussianization
"""
self.isa = isa
def apply(self, data):
"""
Subspace Gaussianize data by first applying a linear transformation and then
radially Gaussianizing each subspace. If C{isa} is overcomplete, C{data} has
to be completed by the nullspace representation, that is, C{data} should
have the dimensionality of the hidden states.
@type data: array_like
@param data: data points stored in columns
"""
# complete basis
A = vstack([self.isa.A, self.isa.nullspace_basis()])
# linearly transform data
data = dot(inv(A), data)
data_rg = []
if isinstance(self.isa.subspaces, Callable):
subspaces = self.isa.subspaces()
else:
subspaces = self.isa.subspaces
length = len(str(len(subspaces)))
if Transform.VERBOSITY > 0:
print ('{0:>' + str(length) + '}/{1}').format(0, len(subspaces)),
# TODO: parallelize
for i, gsm in enumerate(subspaces):
# radially Gaussianize subspace
data_rg.append(
RadialGaussianization(gsm).apply(data[:gsm.dim]))
data = data[gsm.dim:]
if Transform.VERBOSITY > 0:
print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(subspaces)),
if Transform.VERBOSITY > 0:
print
return vstack(data_rg)
def inverse(self, data):
"""
Apply inverse subspace Gaussianization.
"""
data_irg = []
if isinstance(self.isa.subspaces, Callable):
subspaces = self.isa.subspaces()
else:
subspaces = self.isa.subspaces
length = len(str(len(subspaces)))
if Transform.VERBOSITY > 0:
print ('{0:>' + str(length) + '}/{1}').format(0, len(subspaces)),
# TODO: parallelize
for i, gsm in enumerate(subspaces):
# inverse radially Gaussianize subspace
data_irg.append(
RadialGaussianization(gsm).inverse(data[:gsm.dim]))
data = data[gsm.dim:]
if Transform.VERBOSITY > 0:
print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(subspaces)),
if Transform.VERBOSITY > 0:
print
data = vstack(data_irg)
# completed filter matrix
# A = vstack([self.isa.A, self.isa.nullspace_basis()])
# linearly transform data
return dot(self.isa.A, data)
def logjacobian(self, data):
"""
Returns the log-determinant of the Jabian matrix evaluated at the given
data points.
@type data: array_like
@param data: data points stored in columns
@rtype: ndarray
@return: the logarithm of the Jacobian determinants
"""
# completed filter matrix
A = vstack([self.isa.A, self.isa.nullspace_basis()])
W = inv(A)
# determinant of linear transformation
logjacobian = zeros([1, data.shape[1]]) + slogdet(W)[1]
# linearly transform data
data = dot(W, data)
if isinstance(self.isa.subspaces, Callable):
subspaces = self.isa.subspaces()
else:
subspaces = self.isa.subspaces
length = len(str(len(subspaces)))
if Transform.VERBOSITY > 0:
print ('{0:>' + str(length) + '}/{1}').format(0, len(subspaces)),
# TODO: parallelize
for i, gsm in enumerate(subspaces):
logjacobian += RadialGaussianization(gsm).logjacobian(data[:gsm.dim])
data = data[gsm.dim:]
if Transform.VERBOSITY > 0:
print (('\b' * (length * 2 + 2)) + '{0:>' + str(length) + '}/{1}').format(i + 1, len(subspaces)),
if Transform.VERBOSITY > 0:
print
return logjacobian
| [
"[email protected]"
] | |
413fae5d3fbae535a19798130e74abb371a25cb9 | 020da726bb378ea9fe58af19caadad8e02bd4e27 | /CODES_RCM/Calcul_Heat_Wave_RCM.py | 31eb9bfd038cd5ce54ea25386c8f2ed2b0b10913 | [] | no_license | guimeto/Heat_Wave_Codes | 06dc19f9547b2cc083db9fd7bd44d22fad072a37 | 85a1c6ae582818a5694aef9b17fc7f3578b3af16 | refs/heads/master | 2020-06-22T05:44:11.064070 | 2020-04-10T18:11:19 | 2020-04-10T18:11:19 | 197,648,190 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,289 | py | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 15 10:31:05 2019
@author: guillaume
"""
import xarray as xr
import numpy as np
import pandas as pd
from netCDF4 import Dataset
model='CANRCM4_NAM-44_ll_CanESM2_historical'
yi = 2000
yf = 2000
tot=(yf-yi)+1
#########################################################
rep_min='K:/PROJETS/PROJET_CORDEX/CORDEX-NAM44/DONNEES/CANRCM4_CanESM2_historical/MONTH/tasmin/'
rep_max='K:/PROJETS/PROJET_CORDEX/CORDEX-NAM44/DONNEES/CANRCM4_CanESM2_historical/MONTH/tasmax/'
rep_hum='K:/PROJETS/PROJET_CORDEX/CORDEX-NAM44/DONNEES/CANRCM4_CanESM2_historical/MONTH/humidex/'
def HWDI(tmin, tmax, hum, ind1, ind2, ind3, seq):
actualCount = 0
sequence = 0
i = 0
while (i <= len(tmin)-1):
while (i+1 < len(tmin)) and (tmin[i] >= ind1) and (tmin[i+1] >= ind1) and (tmax[i] >= ind2) and (tmax[i+1] >= ind2) and (hum[i] >= ind3) and (hum[i+1] >= ind3):
i += 1
if actualCount == 0 :
actualCount += 2
else:
actualCount += 1
if actualCount == 0:
i += 1
actualCount = 0
elif (actualCount == 1) or (actualCount == 2) :
actualCount = 0
elif actualCount >= seq:
sequence += 1
actualCount = 0
return(sequence)
for year in range(yi,yf+1):
data = rep_max + model + '_tasmax_'+str(year) +'*.nc'
tmax = xr.open_mfdataset(data)
data = rep_min + model + '_tasmin_'+str(year) +'*.nc'
tmin = xr.open_mfdataset(data)
data = rep_hum + model + '_humidex_'+str(year) +'*.nc'
hum = xr.open_mfdataset(data)
DS = xr.merge([tmax,tmin, hum])
# get the datetime range
times = pd.date_range("2000-01-01", "2000-12-31", name="time")
times = times[~((times.month == 2) & (times.day == 29))]
DS['time'] = times
DS_date_range = DS.sel(time=slice(str(year) + '-05-01', str(year) + '-09-30'))
DS_date_range.to_netcdf('./tmp.nc')
# Calcul de l'indice
nt=0
IND = np.zeros((tot,130,155),dtype=float)
###### ouverture et lecture des fichiers Netcdf
nc_Modc=Dataset('./tmp.nc','r')
lats=nc_Modc.variables['lat'][:]
lons=nc_Modc.variables['lon'][:]
tmax=nc_Modc.variables['tasmax'][:]
tmin=nc_Modc.variables['tasmin'][:]
humidex=nc_Modc.variables['humidex'][:]
###### boucle sur tous les points de grille et calcul de l'indice
for ni in range(0, len(tmax[0])):
for nj in range(0, len(tmax[0][0])):
IND[nt,ni,nj]=HWDI(tmin[:,ni,nj],tmax[:,ni,nj],humidex[:,ni,nj], 20, 33, 40, 3 )
description='Heat Wave Index'
unite='days'
###### รcriture du fichier Netcdf en sortie
C = Dataset('./output/python/'+model+'_historical_HWDI_'+str(yi)+'-'+str(yf)+'_Mai_Septembre.nc', 'w')
C.description = 'Heat Wave Index'
C.conventions = 'CF-1.0'
C.model_id = model
C.institution = 'UQAM - ESCER Center, University of Quebec in Montreal'
C.contact = 'Guillaume Dueymes'
########################################
# Dimensions
C.createDimension('x', len(tmin[0][0]))
C.createDimension('y', len(tmin[0]))
C.createDimension('time', tot)
var=C.createVariable('HWDI', np.float32, ('time','y','x'))
var.long_name = str(description)
var.unit = str(unite)
lat=C.createVariable('lat', np.float32, ('y','x'))
lon=C.createVariable('lon', np.float32, ('y','x'))
time = C.createVariable('time', np.float64, ('time',))
time.long_name = 'time'
nc_Modr=Dataset(rep_min + model + '_tasmin_200001.nc','r')
lats=nc_Modr.variables['lat'][:]
lons=nc_Modr.variables['lon'][:]
for var in ['lon','lat','time']:
for att in nc_Modr.variables[var].ncattrs():
print(att)
setattr(C.variables[var],att,getattr(nc_Modr.variables[var],att))
time[:]=range(1,nt+1)
lat[:,:] = lats
lon[:,:] = lons
C.variables['HWDI'][:,:,:] = IND[::]
C.close()
| [
"[email protected]"
] | |
d3bb2016d415c47c7039c7cc1a1f2fadb8094a6d | 3d4a3bebf614086cce8a22510d8c27c0bea52f92 | /CDS_pro.py | 6963688c12963d1e348fb2a00680deef640be7ea | [] | no_license | caelus95/MantaOcean | dc031518051daac9b718b4c7664a057a956475f8 | dbc5774f6ecd949a8d8f58c66d0101f816b90dc9 | refs/heads/master | 2023-06-18T22:00:26.353952 | 2021-06-29T13:25:48 | 2021-06-29T13:25:48 | 365,965,350 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,252 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 29 18:03:50 2021
@author: caelus
"""
PKG_path = '/home/caelus/dock_1/Working_hub/LGnDC_dep/python_cent/MantaPKG/'
import sys
sys.path.append(PKG_path)
from Manta_Signals.procc_index import sig_pro, linearRegress4Cube
from Manta_Signals.utility import nc2npy
import os
import numpy as np
import pandas as pd
import xarray as xr
from netCDF4 import Dataset
from tqdm import tqdm
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.axes_grid1 import make_axes_locatable
r_path = '/home/caelus/dock_1/Working_hub/DATA_dep/CDS/'
data_name = 'Detrended_CDS_monthly_199301_201912.nc'
w_path = '/home/caelus/dock_1/Working_hub/DATA_dep/Kuroshio/Detrend/data/'
w_name = 'Detrended_CDS_NP_Total.nc'
minlon,maxlon = 112,280
minlat,maxlat = -10,70
# data_a_6M = data_a_6M.mean(dim='latitude')
def MantaCurl2D(u,v,dx=28400.0,dy=28400.0 ):
import numpy as np
'''
dx = 28400.0 # meters calculated from the 0.25 degree spatial gridding
dy = 28400.0 # meters calculated from the 0.25 degree spatial gridding
'''
u_T = u.transpose([1,0])
v_T = v.transpose([1,0])
du_dx, du_dy = np.gradient(u_T, dx,dy)
dv_dx, dv_dy = np.gradient(v_T, dx,dy)
curl = dv_dx - du_dy
return curl.transpose([1,0])
ADT_t = xr.open_dataset(r_path+data_name,decode_times=True)
ADT_t = ADT_t.loc[dict(latitude=slice(minlat,maxlat),longitude=slice(minlon,maxlon))]
# Calculating Vorticity (Curl)
tmp_ugos = ADT_t.ugos.values
tmp_vgos = ADT_t.vgos.values
t,at,on = tmp_ugos.shape
Curl = np.zeros_like(tmp_ugos)
for i in range(t):
Curl[i,:,:] = MantaCurl2D(tmp_ugos[i,:,:],tmp_vgos[i,:,:])
CURL = xr.Dataset(
{
'curl': (["time","latitude", "longitude"], Curl)#,
# "mask": (["y","x"],mask)
},
coords={
"longitude": (["longitude"], ADT_t.longitude),
"latitude": (["latitude"], ADT_t.latitude),
"time": (['time'], ADT_t.time),
# "reference_time": pd.Timestamp("2014-09-05"),
},)
# Calculating EKE
ADT_t['EKE'] = (ADT_t.ugos*2 + ADT_t.vgos*2)/2
# Merge data
ADT_t = xr.merge([ADT_t,CURL])
ADT_t.to_netcdf(w_path+w_name,'w')
| [
"[email protected]"
] | |
808cf3c32ee1793a9b9e851d99062f6245f9dc9e | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02879/s604770795.py | 8a8a1f8c96a9e183e5e3de56c8219f8a884b4acc | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 274 | py | import math
import collections
import fractions
import itertools
import functools
import operator
def solve():
a, b = map(int, input().split())
if a < 10 and b < 10:
print(a*b)
else:
print(-1)
return 0
if __name__ == "__main__":
solve() | [
"[email protected]"
] | |
31fb114ed920fcfae01485c2a6e8fb4bcf9ab0ff | bbfa3b7ee2008617d33a7c5c7770d22e1aa8836b | /Optimization/dynamic_programming.py | 45645d59bdddc6e1e046f26ab79ad1f62240638a | [
"MIT"
] | permissive | luoshao23/ML_algorithm | 1a0046ce9c3abed029cceffa35defe57fffa82b2 | 6e94fdd0718cd892118fd036c7c5851cf3e6d796 | refs/heads/master | 2021-08-07T08:38:16.102455 | 2020-03-18T06:49:43 | 2020-03-18T06:49:43 | 92,467,636 | 4 | 1 | MIT | 2018-01-16T05:01:29 | 2017-05-26T03:20:08 | Jupyter Notebook | UTF-8 | Python | false | false | 7,776 | py | from random import random, randint, choice
from copy import deepcopy
from math import log
class fwrapper(object):
"""docstring for fwrapper"""
def __init__(self, function, childcount, name):
self.function = function
self.childcount = childcount
self.name = name
class node(object):
"""docstring for node"""
def __init__(self, fw, children):
self.function = fw.function
self.name = fw.name
self.children = children
def evaluate(self, inp):
results = [n.evaluate(inp) for n in self.children]
return self.function(results)
def display(self, indent=0):
print ' ' * indent + self.name
for c in self.children:
c.display(indent + 1)
class paramnode(object):
"""docstring for paramnode"""
def __init__(self, idx):
self.idx = idx
def evaluate(self, inp):
return inp[self.idx]
def display(self, indent=0):
print '%sp%d' % (' ' * indent, self.idx)
class constnode(object):
"""docstring for constnode"""
def __init__(self, v):
self.v = v
def evaluate(self, inp):
return self.v
def display(self, indent=0):
print '%s%d' % (' ' * indent, self.v)
addw = fwrapper(lambda l: l[0] + l[1], 2, 'add')
subw = fwrapper(lambda l: l[0] - l[1], 2, 'subtract')
mulw = fwrapper(lambda l: l[0] * l[1], 2, 'multiply')
def iffunc(l):
if l[0] > 0:
return l[1]
else:
return l[2]
ifw = fwrapper(iffunc, 3, 'if')
def isgreater(l):
if l[0] > l[1]:
return 1
else:
return 0
gtw = fwrapper(isgreater, 2, 'isgreater')
flist = [addw, mulw, ifw, gtw, subw]
def exampletree():
return node(ifw, [
node(gtw, [paramnode(0), constnode(3)]),
node(addw, [paramnode(1), constnode(5)]),
node(subw, [paramnode(1), constnode(2)]),
]
)
def makerandomtree(pc, maxdepth=4, fpr=0.5, ppr=0.6):
if random() < fpr and maxdepth > 0:
f = choice(flist)
children = [makerandomtree(pc, maxdepth - 1, fpr, ppr)
for i in xrange(f.childcount)]
return node(f, children)
elif random() < ppr:
return paramnode(randint(0, pc - 1))
else:
return constnode(randint(0, 10))
def hiddenfunction(x, y):
return x**2 + 2 * y + 3 * x + 5
def buildhiddenset():
rows = []
for i in xrange(200):
x = randint(0, 40)
y = randint(0, 40)
rows.append([x, y, hiddenfunction(x, y)])
return rows
def scorefunction(tree, s):
dif = 0
for data in s:
v = tree.evaluate([data[0], data[1]])
dif += abs(v - data[2])
return dif
def mutate(t, pc, probchange=0.2):
if random() < probchange:
return makerandomtree(pc)
else:
result = deepcopy(t)
if isinstance(t, node):
result.children = [mutate(c, pc, probchange) for c in t.children]
return result
def crossover(t1, t2, probswap=0.7, top=1):
if random() < probswap and not top:
return deepcopy(t2)
else:
result = deepcopy(t1)
if hasattr(t1, 'children') and hasattr(t2, 'children'):
result.children = [crossover(c, choice(t2.children), probswap, 0)
for c in t1.children]
return result
def getrankfunction(dataset):
def rankfunction(population):
scores = [(scorefunction(t, dataset), t) for t in population]
scores.sort()
return scores
return rankfunction
def evolve(pc, popsize, rankfunction, maxgen=500,
mutationrate=0.1, breedingrate=0.4, pexp=0.7, pnew=0.05):
# Returns a random number, tending towards lower numbers. The lower pexp
# is, more lower numbers you will get
def selectindex():
return int(log(random()) / log(pexp))
# Create a random initial population
population = [makerandomtree(pc) for i in range(popsize)]
for i in range(maxgen):
scores = rankfunction(population)
print scores[0][0]
if scores[0][0] == 0:
break
# The two best always make it
newpop = [scores[0][1], scores[1][1]]
# Build the next generation
while len(newpop) < popsize:
if random() > pnew:
newpop.append(mutate(
crossover(scores[selectindex()][1],
scores[selectindex()][1],
probswap=breedingrate),
pc, probchange=mutationrate))
else:
# Add a random node to mix things up
newpop.append(makerandomtree(pc))
population = newpop
scores[0][1].display()
return scores[0][1]
def gridgame(p):
# Board size
max = (3, 3)
# Remember the last move for each player
lastmove = [-1, -1]
# Remember the player's locations
location = [[randint(0, max[0]), randint(0, max[1])]]
# Put the second player a sufficient distance from the first
location.append([(location[0][0] + 2) % 4, (location[0][1] + 2) % 4])
# Maximum of 50 moves before a tie
for o in range(50):
# For each player
for i in range(2):
locs = location[i][:] + location[1 - i][:]
locs.append(lastmove[i])
move = p[i].evaluate(locs) % 4
# You lose if you move the same direction twice in a row
if lastmove[i] == move:
return 1 - i
lastmove[i] = move
if move == 0:
location[i][0] -= 1
# Board wraps
if location[i][0] < 0:
location[i][0] = 0
if move == 1:
location[i][0] += 1
if location[i][0] > max[0]:
location[i][0] = max[0]
if move == 2:
location[i][1] -= 1
if location[i][1] < 0:
location[i][1] = 0
if move == 3:
location[i][1] += 1
if location[i][1] > max[1]:
location[i][1] = max[1]
# If you have captured the other player, you win
if location[i] == location[1 - i]:
return i
def tournament(pl):
# Count losses
losses = [0 for p in pl]
# Every player plays every other player
for i in range(len(pl)):
for j in range(len(pl)):
if i == j:
continue
# Who is the winner?
winner = gridgame([pl[i], pl[j]])
# Two points for a loss, one point for a tie
if winner == 0:
losses[j] += 2
elif winner == 1:
losses[i] += 2
elif winner == -1:
losses[i] += 1
losses[i] += 1
pass
# Sort and return the results
z = zip(losses, pl)
z.sort()
return z
class humanplayer:
def evaluate(self, board):
# Get my location and the location of other players
me = tuple(board[0:2])
others = [tuple(board[x:x + 2]) for x in range(2, len(board) - 1, 2)]
# Display the board
for i in range(4):
for j in range(4):
if (i, j) == me:
print 'O',
elif (i, j) in others:
print 'X',
else:
print '.',
print
# Show moves, for reference
print 'Your last move was %d' % board[len(board) - 1]
print ' 0'
print '2 3'
print ' 1'
print 'Enter move: ',
# Return whatever the user enters
move = int(raw_input())
return move
| [
"[email protected]"
] | |
0d7184a80475f7a0641e24853ef565479c0e124d | c6d4fa98b739a64bb55a8750b4aecd0fc0b105fd | /ScanPi/QRbytes2/64.py | 3f5c349b6d306976ab708a09143df5219a533f84 | [] | no_license | NUSTEM-UK/Heart-of-Maker-Faire | de2c2f223c76f54a8b4c460530e56a5c74b65ca3 | fa5a1661c63dac3ae982ed080d80d8da0480ed4e | refs/heads/master | 2021-06-18T13:14:38.204811 | 2017-07-18T13:47:49 | 2017-07-18T13:47:49 | 73,701,984 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,746 | py | data = [
0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0,
0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00,
0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF,
0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F,
0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03,
0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00,
0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F,
0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00,
0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0x00, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x3F, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x3F,
0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00,
0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0x00, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x3F, 0x00, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x3F,
0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00,
0x00, 0x3F, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0x00, 0x00, 0x3F, 0x00, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F,
0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F,
0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03,
0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00,
0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0,
0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC,
0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0xFF,
0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F,
0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x0F, 0xC0,
0xFC, 0x0F, 0xFF, 0xFF, 0xFF, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xC0,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F,
0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF,
0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFC,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x0F, 0xFF, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF,
0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xFF, 0xFF, 0xFF, 0xC0,
0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xFF, 0xFF,
0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F,
0xFF, 0xFF, 0xFF, 0xC0, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x03, 0xF0, 0x3F, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F, 0x00,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x03, 0xF0, 0x3F, 0xFF, 0xFF, 0xC0, 0x03, 0xF0,
0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x03, 0xF0, 0x3F, 0xFF, 0xFF, 0xC0,
0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x03, 0xF0, 0x3F, 0xFF,
0xFF, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x03, 0xF0,
0x3F, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F, 0x00, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x03, 0xF0, 0x3F, 0xFF, 0xFF, 0xC0, 0x03, 0xF0, 0x3F, 0x00,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x3F, 0xFF, 0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFC, 0x0F, 0xC0,
0xFC, 0x00, 0x3F, 0xFF, 0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFC,
0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFF, 0xF0, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x3F, 0xFF, 0xFF,
0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFF, 0xF0, 0x00,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x3F,
0xFF, 0xFF, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00, 0x3F, 0xFF,
0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0x3F, 0xFF, 0xFF, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x00,
0x3F, 0xFF, 0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0x03, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0x03, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0x03, 0xFF, 0xC0, 0x00, 0x00, 0x3F,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3F,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0x03, 0xFF, 0xC0, 0x00,
0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x3F, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0x03, 0xFF,
0xC0, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0x03, 0xFF, 0xC0, 0x00, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x03,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0xFF, 0xFF,
0xFF, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0,
0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x03, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0xFF, 0xFF, 0xFF, 0x03,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xFF, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xFF, 0x00, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x3F,
0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0xFC,
0x00, 0x3F, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x3F, 0xFC, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xF0, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03,
0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x03, 0xF0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF,
0xFC, 0x00, 0x00, 0x03, 0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00,
0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xF0, 0x00, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x03, 0xF0, 0x00,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x00, 0x00, 0x03,
0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF, 0xFC, 0x00,
0x00, 0x03, 0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0x00, 0x0F, 0xFF,
0xFC, 0x00, 0x00, 0x03, 0xF0, 0x00, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x00, 0x3F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x00, 0x3F,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC,
0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0, 0xFC, 0x00,
0x00, 0xFC, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF, 0xFF, 0xC0,
0xFC, 0x00, 0x00, 0xFC, 0x00, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFF,
0xFF, 0xC0, 0xFC, 0x00, 0x00, 0xFC, 0x00, 0x3F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF,
0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC, 0x0F, 0xC0,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F, 0xC0, 0xFC,
0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x0F, 0xFF, 0xFC, 0x0F,
0xC0, 0xFC, 0x0F, 0xC0, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xC0,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF, 0x03, 0xF0, 0x00, 0xFF,
0xF0, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC,
0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF, 0x03, 0xF0,
0x00, 0xFF, 0xF0, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF,
0x03, 0xF0, 0x00, 0xFF, 0xF0, 0x3F, 0xFC, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F, 0xC0, 0x03,
0xFF, 0xFF, 0x03, 0xF0, 0x00, 0xFF, 0xF0, 0x3F, 0xFC, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00, 0x00, 0x0F,
0xC0, 0x03, 0xFF, 0xFF, 0x03, 0xF0, 0x00, 0xFF, 0xF0, 0x3F,
0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFC, 0x00, 0x00,
0x00, 0x0F, 0xC0, 0x03, 0xFF, 0xFF, 0x03, 0xF0, 0x00, 0xFF,
0xF0, 0x3F, 0xFC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F,
0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0,
0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00,
0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03, 0xF0, 0x3F,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F, 0xFF, 0x03,
0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xC0, 0x00, 0x0F, 0xC0, 0xFC, 0x0F,
0xFF, 0x03, 0xF0, 0x3F, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00
]
| [
"[email protected]"
] | |
7b490f80ae0998de959f9d75b7c9872a0e7c11ea | 72a3f41a94202d6d378c222c5cfa9e68155109bb | /uralbrick/tags/0.1/catalogapp/api/__init__.py | 65b560c47a39864c67faf72064810cdf4cf0f98f | [] | no_license | vakhov/python-django-projects | c312b8bcd94aa448a2678c156ff4936e4a68f668 | 6f296aa75d7692eb5dcb68ef4ce20cadee9dc9e6 | refs/heads/master | 2021-01-17T12:12:56.730072 | 2012-07-25T16:40:45 | 2012-07-25T16:40:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 60 | py | import brands
import goods
import sections
import specfields | [
"succubi@succubi-Inspiron-1501.(none)"
] | succubi@succubi-Inspiron-1501.(none) |
6f4d7ba3cce0793b202eb91ca89412a78b296c3e | 787778879d68946cb053744278771b14541bd198 | /tensorflow/contrib/skflow/python/skflow/estimators/base.py | 30332a00c9a104821466b1a4ec7adbf92559f538 | [
"Apache-2.0"
] | permissive | yluo42/tensorflow | 82a8ea0267d040377c88bf90459a89bc9d18f41d | 30f3cdfc420d831e2591cce62fa51164cf8a700a | refs/heads/master | 2021-09-24T20:29:32.182046 | 2016-04-05T18:44:52 | 2016-04-05T18:44:52 | 55,558,456 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,945 | py | """Base estimator class."""
# Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import os
import shutil
from six import string_types
import numpy as np
import tensorflow as tf
from google.protobuf import text_format
from sklearn.base import BaseEstimator
try:
from sklearn.exceptions import NotFittedError
except ImportError:
from sklearn.utils.validation import NotFittedError # pylint: disable=ungrouped-imports
from ..trainer import TensorFlowTrainer, RestoredTrainer
from ..io.data_feeder import setup_train_data_feeder
from ..io.data_feeder import setup_predict_data_feeder
from ..ops.dropout_ops import DROPOUTS
from .. import monitors
from ..addons.config_addon import ConfigAddon
def _write_with_backup(filename, content):
if os.path.exists(filename):
shutil.move(filename, filename + '.old')
with open(filename, 'w') as f:
f.write(content)
class TensorFlowEstimator(BaseEstimator):
"""Base class for all TensorFlow estimators.
Parameters:
model_fn: Model function, that takes input X, y tensors and outputs
prediction and loss tensors.
n_classes: Number of classes in the target.
tf_master: TensorFlow master. Empty string is default for local.
batch_size: Mini batch size.
steps: Number of steps to run over data.
optimizer: Optimizer name (or class), for example "SGD", "Adam",
"Adagrad".
learning_rate: If this is constant float value, no decay function is used.
Instead, a customized decay function can be passed that accepts
global_step as parameter and returns a Tensor.
e.g. exponential decay function:
def exp_decay(global_step):
return tf.train.exponential_decay(
learning_rate=0.1, global_step,
decay_steps=2, decay_rate=0.001)
class_weight: None or list of n_classes floats. Weight associated with
classes for loss computation. If not given, all classes are suppose to have
weight one.
tf_random_seed: Random seed for TensorFlow initializers.
Setting this value, allows consistency between reruns.
continue_training: when continue_training is True, once initialized
model will be continuely trained on every call of fit.
config_addon: ConfigAddon object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc.
verbose: Controls the verbosity, possible values:
0: the algorithm and debug information is muted.
1: trainer prints the progress.
2: log device placement is printed.
max_to_keep: The maximum number of recent checkpoint files to keep.
As new files are created, older files are deleted.
If None or 0, all checkpoint files are kept.
Defaults to 5 (that is, the 5 most recent checkpoint files are kept.)
keep_checkpoint_every_n_hours: Number of hours between each checkpoint
to be saved. The default value of 10,000 hours effectively disables the feature.
"""
def __init__(self, model_fn, n_classes, tf_master="", batch_size=32,
steps=200, optimizer="Adagrad",
learning_rate=0.1, class_weight=None,
tf_random_seed=42, continue_training=False,
config_addon=None, verbose=1,
max_to_keep=5, keep_checkpoint_every_n_hours=10000):
self.n_classes = n_classes
self.tf_master = tf_master
self.batch_size = batch_size
self.steps = steps
self.verbose = verbose
self.optimizer = optimizer
self.learning_rate = learning_rate
self.tf_random_seed = tf_random_seed
self.model_fn = model_fn
self.continue_training = continue_training
self._initialized = False
self.max_to_keep = max_to_keep
self.keep_checkpoint_every_n_hours = keep_checkpoint_every_n_hours
self.class_weight = class_weight
self.config_addon = config_addon
def _setup_training(self):
"""Sets up graph, model and trainer."""
self._graph = tf.Graph()
self._graph.add_to_collection("IS_TRAINING", True)
with self._graph.as_default():
tf.set_random_seed(self.tf_random_seed)
self._global_step = tf.Variable(
0, name="global_step", trainable=False)
# Setting up input and output placeholders.
input_shape = [None] + self._data_feeder.input_shape[1:]
output_shape = [None] + self._data_feeder.output_shape[1:]
self._inp = tf.placeholder(
tf.as_dtype(self._data_feeder.input_dtype), input_shape,
name="input")
self._out = tf.placeholder(
tf.as_dtype(self._data_feeder.output_dtype), output_shape,
name="output")
# If class weights are provided, add them to the graph.
# Different loss functions can use this tensor by name.
if self.class_weight:
self._class_weight_node = tf.constant(
self.class_weight, name='class_weight')
# Add histograms for X and y if they are floats.
if self._data_feeder.input_dtype in (np.float32, np.float64):
tf.histogram_summary("X", self._inp)
if self._data_feeder.output_dtype in (np.float32, np.float64):
tf.histogram_summary("y", self._out)
# Create model's graph.
self._model_predictions, self._model_loss = self.model_fn(
self._inp, self._out)
# Create summary to monitor loss
tf.scalar_summary("loss", self._model_loss)
# Set up a single operator to merge all the summaries
self._summaries = tf.merge_all_summaries()
# Create trainer and augment graph with gradients and optimizer.
# Additionally creates initialization ops.
self._trainer = TensorFlowTrainer(
loss=self._model_loss, global_step=self._global_step,
optimizer=self.optimizer, learning_rate=self.learning_rate)
# Create model's saver capturing all the nodes created up until now.
self._saver = tf.train.Saver(
max_to_keep=self.max_to_keep,
keep_checkpoint_every_n_hours=self.keep_checkpoint_every_n_hours)
# Enable monitor to create validation data dict with appropriate tf placeholders
self._monitor.create_val_feed_dict(self._inp, self._out)
# Create session to run model with.
if self.config_addon is None:
self.config_addon = ConfigAddon(verbose=self.verbose)
self._session = tf.Session(self.tf_master, config=self.config_addon.config)
def _setup_summary_writer(self, logdir):
"""Sets up the summary writer to prepare for later optional visualization."""
self._summary_writer = tf.train.SummaryWriter(
os.path.join(logdir, datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')),
graph_def=self._session.graph_def)
def fit(self, X, y, monitor=None, logdir=None):
"""Builds a neural network model given provided `model_fn` and training
data X and y.
Note: called first time constructs the graph and initializers
variables. Consecutives times it will continue training the same model.
This logic follows partial_fit() interface in scikit-learn.
To restart learning, create new estimator.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class labels in classification, real numbers in regression).
monitor: Monitor object to print training progress and invoke early stopping
logdir: the directory to save the log file that can be used for
optional visualization.
Returns:
Returns self.
"""
# Sets up data feeder.
self._data_feeder = setup_train_data_feeder(X, y,
self.n_classes,
self.batch_size)
if monitor is None:
self._monitor = monitors.default_monitor(verbose=self.verbose)
else:
self._monitor = monitor
if not self.continue_training or not self._initialized:
# Sets up model and trainer.
self._setup_training()
# Initialize model parameters.
self._trainer.initialize(self._session)
self._initialized = True
# Sets up summary writer for later optional visualization.
# Due to not able to setup _summary_writer in __init__ as it's not a
# parameter of the model, here we need to check if such variable exists
# and if it's None or not (in case it was setup in a previous run).
# It is initialized only in the case where it wasn't before and log dir
# is provided.
if logdir:
if (not hasattr(self, "_summary_writer") or
(hasattr(self, "_summary_writer") and self._summary_writer is None)):
self._setup_summary_writer(logdir)
else:
self._summary_writer = None
# Train model for given number of steps.
self._trainer.train(self._session,
self._data_feeder.get_feed_dict_fn(
self._inp, self._out),
self.steps,
self._monitor,
self._summary_writer,
self._summaries,
feed_params_fn=self._data_feeder.get_feed_params)
return self
def partial_fit(self, X, y):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different or the same chunks of the dataset. This either can
implement iterative training or out-of-core/online training.
This is especially useful when the whole dataset is too big to
fit in memory at the same time. Or when model is taking long time
to converge, and you want to split up training into subparts.
Args:
X: matrix or tensor of shape [n_samples, n_features...]. Can be
iterator that returns arrays of features. The training input
samples for fitting the model.
y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be
iterator that returns array of targets. The training target values
(class label in classification, real numbers in regression).
Returns:
Returns self.
"""
return self.fit(X, y)
def _predict(self, X, axis=-1, batch_size=None):
if not self._initialized:
raise NotFittedError()
# Use the batch size for fitting if the user did not specify one.
if batch_size is None:
batch_size = self.batch_size
self._graph.add_to_collection("IS_TRAINING", False)
predict_data_feeder = setup_predict_data_feeder(
X, batch_size=batch_size)
preds = []
dropouts = self._graph.get_collection(DROPOUTS)
feed_dict = {prob: 1.0 for prob in dropouts}
for data in predict_data_feeder:
feed_dict[self._inp] = data
predictions_for_batch = self._session.run(
self._model_predictions,
feed_dict)
if self.n_classes > 1 and axis != -1:
preds.append(predictions_for_batch.argmax(axis=axis))
else:
preds.append(predictions_for_batch)
return np.concatenate(preds, axis=0)
def predict(self, X, axis=1, batch_size=None):
"""Predict class or regression for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
axis: Which axis to argmax for classification.
By default axis 1 (next after batch) is used.
Use 2 for sequence predictions.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size member
variable is used.
Returns:
y: array of shape [n_samples]. The predicted classes or predicted
value.
"""
return self._predict(X, axis=axis, batch_size=batch_size)
def predict_proba(self, X, batch_size=None):
"""Predict class probability of the input samples X.
Args:
X: array-like matrix, [n_samples, n_features...] or iterator.
batch_size: If test set is too big, use batch size to split
it into mini batches. By default the batch_size
member variable is used.
Returns:
y: array of shape [n_samples, n_classes]. The predicted
probabilities for each class.
"""
return self._predict(X, batch_size=batch_size)
def get_tensor(self, name):
"""Returns tensor by name.
Args:
name: string, name of the tensor.
Returns:
Tensor.
"""
return self._graph.get_tensor_by_name(name)
def get_tensor_value(self, name):
"""Returns value of the tensor give by name.
Args:
name: string, name of the tensor.
Returns:
Numpy array - value of the tensor.
"""
return self._session.run(self.get_tensor(name))
def save(self, path):
"""Saves checkpoints and graph to given path.
Args:
path: Folder to save model to.
"""
if not self._initialized:
raise NotFittedError()
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
if not os.path.exists(path):
os.makedirs(path)
if not os.path.isdir(path):
raise ValueError("Path %s should be a directory to save"
"checkpoints and graph." % path)
# Save model definition.
all_params = self.get_params()
params = {}
for key, value in all_params.items():
if not callable(value) and value is not None:
params[key] = value
params['class_name'] = type(self).__name__
model_def = json.dumps(
params,
default=lambda o: o.__dict__ if hasattr(o, '__dict__') else None)
_write_with_backup(os.path.join(path, 'model.def'), model_def)
# Save checkpoints.
endpoints = '%s\n%s\n%s\n%s' % (
self._inp.name,
self._out.name,
self._model_predictions.name,
self._model_loss.name)
_write_with_backup(os.path.join(path, 'endpoints'), endpoints)
# Save graph definition.
_write_with_backup(os.path.join(path, 'graph.pbtxt'), str(self._graph.as_graph_def()))
# Save saver defintion.
_write_with_backup(os.path.join(path, 'saver.pbtxt'), str(self._saver.as_saver_def()))
# Save checkpoints.
self._saver.save(self._session, os.path.join(path, 'model'),
global_step=self._global_step)
def _restore(self, path):
"""Restores this estimator from given path.
Note: will rebuild the graph and initialize all parameters,
and will ignore provided model.
Args:
path: Path to checkpoints and other information.
"""
# Currently Saver requires absolute path to work correctly.
path = os.path.abspath(path)
self._graph = tf.Graph()
with self._graph.as_default():
endpoints_filename = os.path.join(path, 'endpoints')
if not os.path.exists(endpoints_filename):
raise ValueError("Restore folder doesn't contain endpoints.")
with open(endpoints_filename) as foutputs:
endpoints = foutputs.read().split('\n')
graph_filename = os.path.join(path, 'graph.pbtxt')
if not os.path.exists(graph_filename):
raise ValueError("Restore folder doesn't contain graph definition.")
with open(graph_filename) as fgraph:
graph_def = tf.GraphDef()
text_format.Merge(fgraph.read(), graph_def)
(self._inp, self._out,
self._model_predictions, self._model_loss) = tf.import_graph_def(
graph_def, name='', return_elements=endpoints)
saver_filename = os.path.join(path, 'saver.pbtxt')
if not os.path.exists(saver_filename):
raise ValueError("Restore folder doesn't contain saver defintion.")
with open(saver_filename) as fsaver:
saver_def = tf.train.SaverDef()
text_format.Merge(fsaver.read(), saver_def)
self._saver = tf.train.Saver(saver_def=saver_def)
# Restore trainer
self._global_step = self._graph.get_tensor_by_name('global_step:0')
trainer_op = self._graph.get_operation_by_name('train')
self._trainer = RestoredTrainer(
self._model_loss, self._global_step, trainer_op)
# Restore summaries.
self._summaries = self._graph.get_operation_by_name('MergeSummary/MergeSummary')
# Restore session.
if not isinstance(self.config_addon, ConfigAddon):
self.config_addon = ConfigAddon(verbose=self.verbose)
self._session = tf.Session(
self.tf_master,
config=self.config_addon.config)
checkpoint_path = tf.train.latest_checkpoint(path)
if checkpoint_path is None:
raise ValueError("Missing checkpoint files in the %s. Please "
"make sure you are you have checkpoint file that describes "
"latest checkpoints and appropriate checkpoints are there. "
"If you have moved the folder, you at this point need to "
"update manually update the paths in the checkpoint file." % path)
self._saver.restore(self._session, checkpoint_path)
# Set to be initialized.
self._initialized = True
# pylint: disable=unused-argument
@classmethod
def restore(cls, path, config_addon=None):
"""Restores model from give path.
Args:
path: Path to the checkpoints and other model information.
config_addon: ConfigAddon object that controls the configurations of the session,
e.g. num_cores, gpu_memory_fraction, etc. This is allowed to be reconfigured.
Returns:
Estiamator, object of the subclass of TensorFlowEstimator.
"""
model_def_filename = os.path.join(path, 'model.def')
if not os.path.exists(model_def_filename):
raise ValueError("Restore folder doesn't contain model definition.")
# list of parameters that are allowed to be reconfigured
reconfigurable_params = ['config_addon']
with open(model_def_filename) as fmodel:
model_def = json.loads(fmodel.read())
# TensorFlow binding requires parameters to be strings not unicode.
# Only issue in Python2.
for key, value in model_def.items():
if (isinstance(value, string_types) and
not isinstance(value, str)):
model_def[key] = str(value)
if key in reconfigurable_params:
newValue = locals()[key]
if newValue is not None:
model_def[key] = newValue
class_name = model_def.pop('class_name')
if class_name == 'TensorFlowEstimator':
custom_estimator = TensorFlowEstimator(model_fn=None, **model_def)
custom_estimator._restore(path)
return custom_estimator
# To avoid cyclical dependencies, import inside the function instead of
# the beginning of the file.
from tensorflow.contrib.skflow.python.skflow import estimators
# Estimator must be one of the defined estimators in the __init__ file.
estimator = getattr(estimators, class_name)(**model_def)
estimator._restore(path)
return estimator
| [
"[email protected]"
] | |
9f89cd804a35d8fb1df192bfec08abe49386513a | 3784495ba55d26e22302a803861c4ba197fd82c7 | /src/torch/legacy/nn/SpatialConvolutionMap.py | e901140a528bb3805879a4884c68b43e1afbd26d | [
"MIT"
] | permissive | databill86/HyperFoods | cf7c31f5a6eb5c0d0ddb250fd045ca68eb5e0789 | 9267937c8c70fd84017c0f153c241d2686a356dd | refs/heads/master | 2021-01-06T17:08:48.736498 | 2020-02-11T05:02:18 | 2020-02-11T05:02:18 | 241,407,659 | 3 | 0 | MIT | 2020-02-18T16:15:48 | 2020-02-18T16:15:47 | null | UTF-8 | Python | false | false | 4,384 | py | import random
import math
import torch
from .Module import Module
# TODO fix THNN...
class SpatialConvolutionMap(Module):
class maps(object):
@staticmethod
def full(nin, nout):
ft = torch.Tensor(nin * nout, 2)
p = 0
for j in range(nout):
for i in range(nin):
ft[p][0] = i
ft[p][1] = j
p += 1
return ft
@staticmethod
def oneToOne(nfeat):
ft = torch.Tensor(nfeat, 2)
for i in range(nfeat):
ft[i][0] = i
ft[i][1] = i
return ft
@staticmethod
def random(nin, nout, nto):
nker = nto * nout
tbl = torch.Tensor(nker, 2)
fi = torch.randperm(nin)
frcntr = 0
nfi = math.floor(nin / nto) # number of distinct nto chunks
totbl = tbl.select(1, 1)
frtbl = tbl.select(1, 0)
fitbl = fi.narrow(0, 0, (nfi * nto)) # part of fi that covers distinct chunks
ufrtbl = frtbl.unfold(0, nto, nto)
utotbl = totbl.unfold(0, nto, nto)
ufitbl = fitbl.unfold(0, nto, nto)
# start fill_ing frtbl
for i in range(nout): # fro each unit in target map
ufrtbl.select(0, i).copy_(ufitbl.select(0, frcntr))
frcntr += 1
if frcntr - 1 == nfi: # reset fi
fi.copy_(torch.randperm(nin))
frcntr = 1
for tocntr in range(utotbl.size(0)):
utotbl.select(0, tocntr).fill_(tocntr)
return tbl
def __init__(self, conMatrix, kW, kH, dW=1, dH=1):
super(SpatialConvolutionMap, self).__init__()
self.kW = kW
self.kH = kH
self.dW = dW
self.dH = dH
self.connTable = conMatrix
self.nInputPlane = int(self.connTable.select(1, 0).max()) + 1
self.nOutputPlane = int(self.connTable.select(1, 1).max()) + 1
self.weight = torch.Tensor(self.connTable.size(0), kH, kW)
self.bias = torch.Tensor(self.nOutputPlane)
self.gradWeight = torch.Tensor(self.connTable.size(0), kH, kW)
self.gradBias = torch.Tensor(self.nOutputPlane)
self.reset()
def reset(self, stdv=None):
if stdv is not None:
stdv = stdv * math.sqrt(3)
self.weight.uniform_(-stdv, stdv)
self.bias.uniform_(-stdv, stdv)
else:
ninp = torch.Tensor(self.nOutputPlane).zero_()
for i in range(self.connTable.size(0)):
idx = int(self.connTable[i, 1])
ninp[idx] += 1
for k in range(self.connTable.size(0)):
idx = int(self.connTable[k, 1])
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[idx])
self.weight.select(0, k).uniform_(-stdv, stdv)
for k in range(self.bias.size(0)):
stdv = 1. / math.sqrt(self.kW * self.kH * ninp[k])
# TODO: torch.uniform
self.bias[k] = random.uniform(-stdv, stdv)
def updateOutput(self, input):
self._backend.SpatialConvolutionMap_updateOutput(
self._backend.library_state,
input,
self.output,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.output
def updateGradInput(self, input, gradOutput):
self._backend.SpatialConvolutionMap_updateGradInput(
self._backend.library_state,
input,
gradOutput,
self.gradInput,
self.weight,
self.bias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH
)
return self.gradInput
def accGradParameters(self, input, gradOutput, scale=1):
self._backend.SpatialConvolutionMap_accGradParameters(
self._backend.library_state,
input,
gradOutput,
self.gradWeight,
self.gradBias,
self.connTable,
self.nInputPlane,
self.nOutputPlane,
self.dW, self.dH,
scale
)
| [
"[email protected]"
] | |
8a8c83115058802253bdc23bf2c6e0f87b1dce74 | d134ab3ea473ed01c05d4be70734f2fdb6e257c6 | /tensorflow/tools/compatibility/tf_upgrade_v2_test.py | 4460ad5182ac56ea7e7ccb6d5a134dc244200315 | [
"Apache-2.0"
] | permissive | tp46/tensorflow | 2ff6b3d3b09c65bf2a8b6ae33cbb5ff94bf5e74d | 2a050766bf0556d7d92eea62d40fd2bebbcb399f | refs/heads/master | 2020-04-10T07:29:49.244741 | 2018-12-07T22:18:35 | 2018-12-07T22:27:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 26,473 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for tf 2.0 upgrader."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
import tensorflow as tf
# OSS TF V2 import placeholder.
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test as test_lib
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_export
from tensorflow.python.util import tf_inspect
from tensorflow.tools.common import public_api
from tensorflow.tools.common import traverse
from tensorflow.tools.compatibility import ast_edits
from tensorflow.tools.compatibility import tf_upgrade_v2
_TENSORFLOW_API_ATTR_V1 = (
tf_export.API_ATTRS_V1[tf_export.TENSORFLOW_API_NAME].names)
_TENSORFLOW_API_ATTR = tf_export.API_ATTRS[tf_export.TENSORFLOW_API_NAME].names
_ESTIMATOR_API_ATTR_V1 = (
tf_export.API_ATTRS_V1[tf_export.ESTIMATOR_API_NAME].names)
_ESTIMATOR_API_ATTR = tf_export.API_ATTRS[tf_export.ESTIMATOR_API_NAME].names
def get_v1_names(symbol):
names_v1 = []
if hasattr(symbol, _TENSORFLOW_API_ATTR_V1):
names_v1.extend(getattr(symbol, _TENSORFLOW_API_ATTR_V1))
if hasattr(symbol, _ESTIMATOR_API_ATTR_V1):
names_v1.extend(getattr(symbol, _ESTIMATOR_API_ATTR_V1))
return names_v1
def get_v2_names(symbol):
names_v2 = set()
if hasattr(symbol, _TENSORFLOW_API_ATTR):
names_v2.update(getattr(symbol, _TENSORFLOW_API_ATTR))
if hasattr(symbol, _ESTIMATOR_API_ATTR):
names_v2.update(getattr(symbol, _ESTIMATOR_API_ATTR))
return list(names_v2)
def get_symbol_for_name(root, name):
name_parts = name.split(".")
symbol = root
# Iterate starting with second item since 1st item is "tf.".
for part in name_parts[1:]:
symbol = getattr(symbol, part)
return symbol
def get_func_and_args_from_str(call_str):
"""Parse call string to get function and argument names.
Args:
call_str: Call string must be in the form:
`tf.foo(arg1=val1, arg2=val2, ...)`.
Returns:
(function_name, list of arg names) tuple.
"""
open_paren_index = call_str.find("(")
close_paren_index = call_str.rfind(")")
function_name = call_str[:call_str.find("(")]
args = call_str[open_paren_index+1:close_paren_index].split(",")
args = [arg.split("=")[0].strip() for arg in args]
return function_name, args
class TestUpgrade(test_util.TensorFlowTestCase):
"""Test various APIs that have been changed in 2.0.
We also test whether a converted file is executable. test_file_v1_10.py
aims to exhaustively test that API changes are convertible and actually
work when run with current TensorFlow.
"""
@classmethod
def setUpClass(cls):
cls.v2_symbols = {}
if not hasattr(tf.compat, "v2"):
return
def symbol_collector(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names_v2 = get_v2_names(attr)
for name in api_names_v2:
cls.v2_symbols["tf." + name] = attr
visitor = public_api.PublicAPIVisitor(symbol_collector)
traverse.traverse(tf.compat.v2, visitor)
def _upgrade(self, old_file_text):
in_file = six.StringIO(old_file_text)
out_file = six.StringIO()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
count, report, errors = (
upgrader.process_opened_file("test.py", in_file,
"test_out.py", out_file))
return count, report, errors, out_file.getvalue()
def testParseError(self):
_, report, unused_errors, unused_new_text = self._upgrade(
"import tensorflow as tf\na + \n")
self.assertTrue(report.find("Failed to parse") != -1)
def testReport(self):
text = "tf.assert_near(a)\n"
_, report, unused_errors, unused_new_text = self._upgrade(text)
# This is not a complete test, but it is a sanity test that a report
# is generating information.
self.assertTrue(report.find("Renamed function `tf.assert_near` to "
"`tf.debugging.assert_near`"))
def testRename(self):
text = "tf.conj(a)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.conj(a)\n")
text = "tf.rsqrt(tf.log_sigmoid(3.8))\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.math.rsqrt(tf.math.log_sigmoid(3.8))\n")
def testAllAPI(self):
if not hasattr(tf.compat, "v2"):
return
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v2 namespace.
# Please regenerate the renames file or edit any manual renames if this
# test fails.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = get_v1_names(attr)
for name in api_names:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
text not in self.v2_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v2 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testAllAPIV1(self):
collect = True
v1_symbols = set([])
# Converts all symbols in the v1 namespace to the v2 namespace, raising
# an error if the target of the conversion is not in the v1 namespace.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
api_names = get_v1_names(attr)
for name in api_names:
if collect:
v1_symbols.add("tf." + name)
else:
_, _, _, text = self._upgrade("tf." + name)
if (text and
not text.startswith("tf.compat.v1") and
text not in v1_symbols):
self.assertFalse(
True, "Symbol %s generated from %s not in v1 API" % (
text, name))
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
collect = False
traverse.traverse(tf.compat.v1, visitor)
def testKeywordArgNames(self):
if not hasattr(tf.compat, "v2"):
return
all_keyword_renames = (
tf_upgrade_v2.TFAPIChangeSpec().function_keyword_renames)
v2_name_exceptions = {"verify_shape_is_now_always_true"}
# Visitor that verifies V1 argument names, converts to V2 and checks
# V2 argument names.
def conversion_visitor(unused_path, unused_parent, children):
for child in children:
_, attr = tf_decorator.unwrap(child[1])
names_v1 = get_v1_names(attr)
for name in names_v1:
name = "tf.%s" % name
if name not in all_keyword_renames:
continue
arg_names_v1 = tf_inspect.getargspec(attr)[0]
keyword_renames = all_keyword_renames[name]
self.assertEqual(type(keyword_renames), dict)
# Assert that v1 function has valid v1 argument names.
for from_name, _ in keyword_renames.items():
self.assertIn(
from_name, arg_names_v1,
"%s not found in %s arguments: %s" %
(from_name, name, str(arg_names_v1)))
# Assert that arg names after converting to v2 are present in
# v2 function.
# 1. First, create an input of the form:
# tf.foo(arg1=val1, arg2=val2, ...)
args = ",".join(
["%s=%d" % (from_name, from_index)
for from_index, from_name in enumerate(keyword_renames.keys())])
text_input = "%s(%s)" % (name, args)
# 2. Convert the input to V2.
_, _, _, text = self._upgrade(text_input)
new_function_name, new_args = get_func_and_args_from_str(text)
# 3. Verify V2 function and arguments.
# Note: If we rename arguments, new function must be available in 2.0.
# We should not be using compat.v1 in this case.
self.assertIn(new_function_name, self.v2_symbols)
args_v2 = tf_inspect.getargspec(self.v2_symbols[new_function_name])[0]
args_v2.extend(v2_name_exceptions)
for new_arg in new_args:
self.assertIn(new_arg, args_v2)
visitor = public_api.PublicAPIVisitor(conversion_visitor)
visitor.do_not_descend_map["tf"].append("contrib")
visitor.private_map["tf.compat"] = ["v1", "v2"]
traverse.traverse(tf.compat.v1, visitor)
def testReorderFileNeedsUpdate(self):
reordered_function_names = (
tf_upgrade_v2.TFAPIChangeSpec().reordered_function_names)
function_reorders = (
tf_upgrade_v2.TFAPIChangeSpec().function_reorders)
added_names_message = """Some function names in
self.reordered_function_names are not in reorders_v2.py.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
removed_names_message = """%s in self.reorders_v2 does not match
any name in self.reordered_function_names.
Please run the following commands to update reorders_v2.py:
bazel build tensorflow/tools/compatibility/update:generate_v2_reorders_map
bazel-bin/tensorflow/tools/compatibility/update/generate_v2_reorders_map
"""
self.assertTrue(
reordered_function_names.issubset(function_reorders),
added_names_message)
# function_reorders should contain reordered_function_names
# and their TensorFlow V1 aliases.
for name in function_reorders:
# get other names for this function
attr = get_symbol_for_name(tf.compat.v1, name)
_, attr = tf_decorator.unwrap(attr)
v1_names = get_v1_names(attr)
self.assertTrue(v1_names)
v1_names = ["tf.%s" % n for n in v1_names]
# check if any other name is in
self.assertTrue(
any(n in reordered_function_names for n in v1_names),
removed_names_message % name)
def testRenameConstant(self):
text = "tf.MONOLITHIC_BUILD\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "tf.sysconfig.MONOLITHIC_BUILD\n")
text = "some_call(tf.MONOLITHIC_BUILD)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, "some_call(tf.sysconfig.MONOLITHIC_BUILD)\n")
def testRenameArgs(self):
text = ("tf.nn.pool(input_a, window_shape_a, pooling_type_a, padding_a, "
"dilation_rate_a, strides_a, name_a, data_format_a)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
("tf.nn.pool(input=input_a, window_shape=window_shape_a,"
" pooling_type=pooling_type_a, padding=padding_a, "
"dilations=dilation_rate_a, strides=strides_a, "
"name=name_a, data_format=data_format_a)\n"))
def testReorder(self):
text = "tf.boolean_mask(a, b, c, d)\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text,
"tf.boolean_mask(tensor=a, mask=b, name=c, axis=d)\n")
def testLearningRateDecay(self):
for decay in ["tf.train.exponential_decay",
"tf.train.polynomial_decay", "tf.train.natural_exp_decay",
"tf.train.inverse_time_decay", "tf.train.cosine_decay",
"tf.train.cosine_decay_restarts",
"tf.train.linear_cosine_decay",
"tf.train.noisy_linear_cosine_decay"]:
text = "%s(a, b)\n" % decay
_, report, errors, _ = self._upgrade(text)
self.assertEqual(errors, ["test.py:1: %s requires manual check." % decay])
self.assertIn("%s has been changed" % decay, report)
def testPiecewiseDecay(self):
text = "tf.train.piecewise_constant_decay(a, b)\n"
_, report, errors, _ = self._upgrade(text)
self.assertEqual(
errors,
["test.py:1: tf.train.piecewise_constant_decay requires manual check."])
self.assertIn("tf.train.piecewise_constant_decay has been changed", report)
def testEstimatorLossReductionChange(self):
classes = [
"LinearClassifier", "LinearRegressor", "DNNLinearCombinedClassifier",
"DNNLinearCombinedRegressor", "DNNRegressor", "DNNClassifier",
"BaselineClassifier", "BaselineRegressor"
]
for c in classes:
ns = "tf.estimator." + c
text = ns + "(a, b)"
_, report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, ["test.py:1: %s requires manual check." % ns])
self.assertIn("loss_reduction has been changed", report)
def testDropout(self):
text = "tf.nn.dropout(x, keep_prob, name=\"foo\")\n"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(
new_text,
"tf.nn.dropout(x, 1 - keep_prob, name=\"foo\")\n",
)
text = "tf.nn.dropout(x)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(new_text, text)
self.assertEqual(
errors,
["test.py:1: tf.nn.dropout requires manual check."]
)
def testCountNonZeroChanges(self):
text = (
"tf.math.count_nonzero(input_tensor=input, dtype=dtype, name=name, "
"reduction_indices=axis, keep_dims=keepdims)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.math.count_nonzero(input=input, dtype=dtype, name=name, "
"axis=axis, keepdims=keepdims)\n"
)
self.assertEqual(new_text, expected_text)
def testRandomMultinomialToRandomCategorical(self):
text = (
"tf.random.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
text = (
"tf.multinomial(logits, samples, seed, name, output_dtype)\n"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.random.categorical(logits=logits, num_samples=samples, seed=seed, "
"name=name, dtype=output_dtype)\n"
)
self.assertEqual(new_text, expected_text)
def testConvolutionOpUpdate(self):
text = (
"tf.nn.convolution(input, filter, padding, strides, dilation_rate, "
"name, data_format)"
)
_, unused_report, unused_errors, new_text = self._upgrade(text)
expected_text = (
"tf.nn.convolution(input=input, filters=filter, padding=padding, "
"strides=strides, dilations=dilation_rate, name=name, "
"data_format=data_format)"
)
self.assertEqual(new_text, expected_text)
def testColocateGradientsWithOps(self):
text = "tf.gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "tf.gradients(a, colocate_gradients_with_ops=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, ["test.py:1: tf.gradients requires manual check."])
text = "optimizer.minimize(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.minimize(a, colocate_gradients_with_ops=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors,
["test.py:1: Optimizer.minimize requires manual check."])
text = "optimizer.compute_gradients(a, foo=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors, [])
text = "optimizer.compute_gradients(a, colocate_gradients_with_ops=False)\n"
_, unused_report, errors, new_text = self._upgrade(text)
self.assertEqual(text, new_text)
self.assertEqual(errors,
["test.py:1: Optimizer.compute_gradients "
"requires manual check."])
def testExportSavedModelRename(self):
text = "self.est.export_savedmodel(path)"
_, report, unused_errors, unused_new_text = self._upgrade(text)
self.assertIn(
"rename the method export_savedmodel() to export_saved_model()",
report)
def testArgmin(self):
text = "tf.argmin(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmin(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmin(input, 0)"
expected_text = "tf.argmin(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_min(input, 0)"
expected_text = "tf.argmin(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testArgmax(self):
text = "tf.argmax(input, name=n, dimension=1, output_type=type)"
expected_text = "tf.argmax(input=input, name=n, axis=1, output_type=type)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.argmax(input, 0)"
expected_text = "tf.argmax(input=input, axis=0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.arg_max(input, 0)"
expected_text = "tf.argmax(input, 0)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testBatchToSpace(self):
text = "tf.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.batch_to_space(input, crops, block_size, name)"
expected_text = (
"tf.batch_to_space(input=input, crops=crops, block_shape=block_size, "
"name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.manip.batch_to_space_nd(input, block_shape, crops, name)"
expected_text = "tf.batch_to_space(input, block_shape, crops, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testExtractImagePatches(self):
text = (
"tf.extract_image_patches(images, ksizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
expected_text = (
"tf.image.extract_image_patches(images, sizes=ksizes, strides=strides,"
"rates=rates, padding=padding, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testStatelessMultinomial(self):
text = (
"tf.random.stateless_multinomial(logits, num_samples, seed, "
"output_dtype=dtype, name=name)")
expected_text = (
"tf.random.stateless_categorical(logits, num_samples, seed, "
"dtype=dtype, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSoftMaxCrossEntropyWithLogitsV2(self):
text = "tf.nn.softmax_cross_entropy_with_logits_v2(labels, logits, dim=2)"
expected_text = (
"tf.nn.softmax_cross_entropy_with_logits(labels, logits, axis=2)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseMatmul(self):
text = ("tf.sparse_matmul(a, b, c, d, e, f, g)\n")
expected_text = ("tf.linalg.matmul(a=a, b=b, transpose_a=c, transpose_b=d, "
"a_is_sparse=e, b_is_sparse=f, name=g)\n")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testWeightedMoments(self):
text = "tf.nn.weighted_moments(x, axes, freq, name, kd)"
expected_text = (
"tf.nn.weighted_moments(x=x, axes=axes, frequency_weights=freq, "
"name=name, keep_dims=kd)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseAdd(self):
text = "tf.sparse.add(a, b, t)"
expected_text = "tf.sparse.add(a=a, b=b, threshold=t)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSparseConcat(self):
text = "tf.sparse.concat(ax, inp, name, exp, concat)"
expected_text = (
"tf.sparse.concat(axis=ax, sp_inputs=inp, name=name, "
"expand_nonconcat_dim=exp, axis=concat)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSeparableConv2D(self):
text = "tf.nn.separable_conv2d(inp, d, pt, strides, pad, rate, name, fmt)"
expected_text = (
"tf.nn.separable_conv2d(input=inp, depthwise_filter=d, "
"pointwise_filter=pt, strides=strides, padding=pad, "
"dilations=rate, name=name, data_format=fmt)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpacetoBatch(self):
text = "tf.space_to_batch_nd(input, shape, paddings, name)"
expected_text = "tf.space_to_batch(input, shape, paddings, name)"
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
text = "tf.nn.space_to_batch(input, paddings, block_size, name)"
expected_text = (
"tf.space_to_batch(input=input, paddings=paddings, "
"block_size=block_size, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testInTopK(self):
text = "tf.math.in_top_k(a, b, c, n)"
expected_text = (
"tf.math.in_top_k(predictions=a, targets=b, k=c, name=n)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testDepthToSpace(self):
text = "tf.nn.depth_to_space(input, block_size, name, data_format)"
expected_text = (
"tf.nn.depth_to_space(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookup(self):
text = ("tf.nn.embedding_lookup(params, ids, partition_strategy, name, "
"validate_indices, max_norm)")
expected_text = ("tf.nn.embedding_lookup(params=params, ids=ids, "
"partition_strategy=partition_strategy, name=name, "
"validate_indices=validate_indices, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testEmbeddingLookupSparse(self):
text = ("tf.nn.embedding_lookup_sparse(params, sp_ids, sp_weights, "
"partition_strategy, name, combiner, max_norm)")
expected_text = ("tf.nn.embedding_lookup_sparse(params=params, "
"sp_ids=sp_ids, sp_weights=sp_weights, "
"partition_strategy=partition_strategy, name=name, "
"combiner=combiner, max_norm=max_norm)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testNnInTopK(self):
text = "tf.nn.in_top_k(predictions, targets, k, name)"
expected_text = ("tf.nn.in_top_k(predictions=predictions, "
"targets=targets, k=k, name=name)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
def testSpaceToDepth(self):
text = "tf.nn.space_to_depth(input, block_size, name, data_format)"
expected_text = ("tf.nn.space_to_depth(input=input, block_size=block_size, "
"name=name, data_format=data_format)")
_, unused_report, unused_errors, new_text = self._upgrade(text)
self.assertEqual(new_text, expected_text)
class TestUpgradeFiles(test_util.TensorFlowTestCase):
def testInplace(self):
"""Check to make sure we don't have a file system race."""
temp_file = tempfile.NamedTemporaryFile("w", delete=False)
original = "tf.conj(a)\n"
upgraded = "tf.math.conj(a)\n"
temp_file.write(original)
temp_file.close()
upgrader = ast_edits.ASTCodeUpgrader(tf_upgrade_v2.TFAPIChangeSpec())
upgrader.process_file(temp_file.name, temp_file.name)
self.assertAllEqual(open(temp_file.name).read(), upgraded)
os.unlink(temp_file.name)
if __name__ == "__main__":
test_lib.main()
| [
"[email protected]"
] | |
e2a338049711a6ad6960e1c4ee5c4c2dfd2710f1 | 12c18d9d0b210698f61f70f458a46647fc99356c | /p57.py | 262e70bded90556b2b859ec765ea7307ee71e68f | [] | no_license | ddgvv/dd | 503a64b08e12de79b5812fd39a76c826aadbd06c | 2e07d92123960e9625198a454ad3973671761fda | refs/heads/master | 2021-07-04T16:40:01.904529 | 2019-05-17T08:10:30 | 2019-05-17T08:10:30 | 110,428,294 | 1 | 10 | null | null | null | null | UTF-8 | Python | false | false | 63 | py | string,c=input().split(' ')
place=string.count(c)
print(place)
| [
"[email protected]"
] | |
7fc5fc8829de1dcf123347fcd4404cc65ca1a795 | 2c68f9156087d6d338373f9737fee1a014e4546b | /src/privatedns/azext_privatedns/vendored_sdks/models/aaaa_record_py3.py | 2cd662761ff9da8073fe44d4aa1f2ee418445ab3 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | anpaz/azure-cli-extensions | 8b0d4071c49840da9883f13cb0fd1f4515246ee0 | 847fd487fe61e83f2a4163a9393edc9555267bc2 | refs/heads/master | 2023-04-23T17:22:53.427404 | 2021-01-29T17:48:28 | 2021-01-29T18:01:33 | 257,394,204 | 2 | 0 | MIT | 2021-01-28T10:31:07 | 2020-04-20T20:19:43 | Python | UTF-8 | Python | false | false | 918 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class AaaaRecord(Model):
"""An AAAA record.
:param ipv6_address: The IPv6 address of this AAAA record.
:type ipv6_address: str
"""
_attribute_map = {
'ipv6_address': {'key': 'ipv6Address', 'type': 'str'},
}
def __init__(self, *, ipv6_address: str=None, **kwargs) -> None:
super(AaaaRecord, self).__init__(**kwargs)
self.ipv6_address = ipv6_address
| [
"[email protected]"
] | |
1fafa6db558c6073910962a1fd4bfd7d3019592a | a00ec9691cd5e56db93d290cb7a7b1edfef6437f | /eco/eco/asgi.py | 2059a74dc9da807b96848d839ceb20e1570e7a08 | [] | no_license | pronob1010/Ogani | 7e6dfd424a830bd3355cf07f7a01eb521d8716d4 | f6ff3dc8ed40ce46a993d1f83e122fbbc61836cf | refs/heads/master | 2022-10-24T06:22:44.044064 | 2020-06-21T22:01:08 | 2020-06-21T22:01:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 383 | py | """
ASGI config for eco project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'eco.settings')
application = get_asgi_application()
| [
"[email protected]"
] | |
fce765142eaf423639434d0ee0255320be03ebd1 | e0c8662a56d89730043146ddc340e9e0b9f7de72 | /plugin/11e23c18-1596.py | 3ca27f59f5378a6af4db567a8641f9832a2735e3 | [] | no_license | izj007/bugscan_poc | f2ef5903b30b15c230b292a1ff2dc6cea6836940 | 4490f3c36d4033bdef380577333722deed7bc758 | refs/heads/master | 2020-09-22T17:20:50.408078 | 2019-01-18T09:42:47 | 2019-01-18T09:42:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 643 | py | #coding:utf-8
from lib.curl import *
# -*- coding: utf-8 -*-
import re
def assign(service, arg):
if service=='tipask':
return True,arg
def audit(arg):
code,head,res,errcode, _=curl.curl2(arg+'?dfferfdsfe')
if code == 404 and res:
m=re.search(r'file "(.*)" not found',res)
if m:
security_info('Path:'+','.join(m.groups()))
if __name__=='__main__':
from dummy import *
audit(assign('tipask','http://ask.id028.cn/')[1])
audit(assign('tipask','http://ask.ccun.cn/')[1])
audit(assign('tipask','http://ask.paotuitu.cn/')[1])
audit(assign('tipask','http://wenda.fanmimi.com/')[1]) | [
"[email protected]"
] | |
12769f9a834726649374d31c469d956ec8091f46 | 5cb9dccbcccb8a2137368dd0615fe3e3c7761707 | /simulations/kinova/build/chomp_motion_planner/catkin_generated/pkg.develspace.context.pc.py | 3a3076474a22a84958b84ab31ec82194d9ffd63e | [] | no_license | Simon-Steinmann/sim2real-modular-RL-project | b2467a393014e106043f6128a026f5eac934a83d | 4027590ac94de2d5c914731c09efcf2f318b9ca3 | refs/heads/master | 2020-07-29T01:30:56.450919 | 2019-10-12T09:33:00 | 2019-10-12T09:33:00 | 209,605,548 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 672 | py | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/home/acis/sim2real/simulations/kinova/src/moveit/moveit_planners/chomp/chomp_motion_planner/include".split(';') if "/home/acis/sim2real/simulations/kinova/src/moveit/moveit_planners/chomp/chomp_motion_planner/include" != "" else []
PROJECT_CATKIN_DEPENDS = "".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lchomp_motion_planner".split(';') if "-lchomp_motion_planner" != "" else []
PROJECT_NAME = "chomp_motion_planner"
PROJECT_SPACE_DIR = "/home/acis/sim2real/simulations/kinova/devel/.private/chomp_motion_planner"
PROJECT_VERSION = "1.0.1"
| [
"[email protected]"
] | |
273a796eb3e0d568fc83cd05aff1e374c4393f21 | b1bc2e54f8cd35c9abb6fc4adb35b386c12fe6b4 | /toontown/src/coghq/Stomper.py | f579b1ff2fd2e11404c53d4ef459346b72bf19ad | [] | no_license | satire6/Anesidora | da3a44e2a49b85252b87b612b435fb4970469583 | 0e7bfc1fe29fd595df0b982e40f94c30befb1ec7 | refs/heads/master | 2022-12-16T20:05:13.167119 | 2020-09-11T16:58:04 | 2020-09-11T17:02:06 | 294,751,966 | 89 | 32 | null | null | null | null | UTF-8 | Python | false | false | 4,816 | py | """Stomper module: contains the Stomper class"""
from pandac.PandaModules import *
from direct.interval.IntervalGlobal import *
from direct.distributed.ClockDelta import *
from direct.showbase.PythonUtil import lerp
from direct.fsm import StateData
import math
class Stomper(StateData.StateData, NodePath):
SerialNum = 0
MotionLinear = 0
MotionSinus = 1
MotionHalfSinus = 2
DefaultStompSound = 'phase_5/audio/sfx/AA_drop_safe.mp3'
def __init__(self,
model,
range=5., # range of motion in feet along Z-axis
period=1., # duration of full cycle
phaseShift=0., # 0..1 phase shift
zOffset=0., # how close the stomper should get to Z=0
motionType=None,
shadow=None,
sound=None,
soundLen=None,
):
StateData.StateData.__init__(self, 'StomperDone')
self.SerialNum = Stomper.SerialNum
Stomper.SerialNum += 1
# get the stomp sound
self.sound = sound
self.soundLen = soundLen
if self.sound is not None:
self.sound = base.loadSfx(sound)
self.motionType = motionType
if self.motionType is None:
self.motionType = Stomper.MotionSinus
node = hidden.attachNewNode('Stomper%s' % self.SerialNum)
NodePath.__init__(self, node)
self.model = model.copyTo(self)
self.shadow = shadow
if shadow is not None:
self.shadow = shadow.copyTo(self)
self.shadow.setPos(0,0,.2)
self.TaskName = 'Stomper%sTask' % self.SerialNum
self.range = range
self.zOffset = zOffset
self.period = period
self.phaseShift = phaseShift
def destroy(self):
self.removeNode()
def enter(self, startTime):
# stomper should hit at t=0
if self.motionType is Stomper.MotionLinear:
motionIval = Sequence(
LerpPosInterval(self.model, self.period/2.,
Point3(0,0,self.zOffset+self.range),
startPos=Point3(0,0,self.zOffset)),
WaitInterval(self.period/4.),
LerpPosInterval(self.model, self.period/4.,
Point3(0,0,self.zOffset),
startPos=Point3(0,0,self.zOffset+self.range)),
)
elif self.motionType is Stomper.MotionSinus:
def sinusFunc(t, self=self):
# t: 0..1
# cos(pi) == -1 (hit/down)
# theta: pi..3*pi
theta = math.pi + (t * 2.*math.pi)
# c: -1..1
c = math.cos(theta)
# z: 0..self.range
self.model.setZ(self.zOffset +
((.5 + (c*.5)) * self.range))
motionIval = Sequence(
LerpFunctionInterval(sinusFunc, duration=self.period),
)
elif self.motionType is Stomper.MotionHalfSinus:
def halfSinusFunc(t, self=self):
# t: 0..1
self.model.setZ(self.zOffset +
(math.sin(t * math.pi) * self.range))
motionIval = Sequence(
LerpFunctionInterval(halfSinusFunc, duration=self.period),
)
# put the motion interval into a Parallel so that we can easily add
# concurrent ivals on (like sound, etc)
self.ival = Parallel(
motionIval,
name='Stomper%s' % self.SerialNum,
)
# 'stomp' sound
if self.sound is not None:
# make sure we don't play a sound that's too long; cap the
# sound length to the motion period
if self.soundLen is None:
sndDur = motionIval.getDuration()
else:
sndDur = min(self.soundLen, motionIval.getDuration())
self.ival.append(
SoundInterval(self.sound, duration=sndDur, node=self))
# shadow
if self.shadow is not None:
def adjustShadowScale(t, self=self):
# scale the shadow according to the position of the
# stomper
modelZ = self.model.getZ()
# a=0..1, 0=down, 1=up
a = modelZ/self.range
self.shadow.setScale(lerp(.7, 1., (1.-a)))
self.ival.append(
LerpFunctionInterval(adjustShadowScale, duration=self.period))
self.ival.loop()
self.ival.setT((globalClock.getFrameTime() - startTime) +
(self.period * self.phaseShift))
def exit(self):
self.ival.finish()
del self.ival
| [
"[email protected]"
] | |
81836a77393b703a7e69351661f816b5e0db1eb9 | 5f1c3a2930b20c3847496a249692dc8d98f87eee | /Pandas/Titanic_DataAnalysis/Question8.py | d6bea604e6b82e0b17af8e7a63fd757c6a04672e | [] | no_license | AmbyMbayi/CODE_py | c572e10673ba437d06ec0f2ae16022d7cbe21d1c | 5369abf21a8db1b54a5be6cbd49432c7d7775687 | refs/heads/master | 2020-04-24T05:01:46.277759 | 2019-02-22T08:26:04 | 2019-02-22T08:26:04 | 171,723,155 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 341 | py | """write a pandas program tocreate a pivot table and count survival by gender,
categories wise age of various classes
"""
import pandas as pd
import numpy as np
df = pd.read_csv('titanic.csv')
age = pd.cut(df['age'], [0,10,30,60,80])
result = df.pivot_table('survived', index=['sex', age], columns='pclass', aggfunc='count')
print(result) | [
"[email protected]"
] | |
8b00cb8d70c9d7a361ad920dddfbfdf75d237328 | e59e0bd3cdfc706105ee697cc6683554d3cdb371 | /utime/augmentation/elastic_deformation.py | 9c267b66b0bdb1de51ca1870c185892338355305 | [
"MIT"
] | permissive | jennynanap/U-Time | 02455307cd67abf975d659346d4a1f3acebd8eed | f7c8e3f1368f43226872a69b0fbb8c29990e4bd9 | refs/heads/master | 2023-07-10T16:26:23.648995 | 2021-08-24T12:09:30 | 2021-08-24T12:09:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,570 | py | import numpy as np
from mpunet.interpolation import RegularGridInterpolator
from scipy.ndimage.filters import gaussian_filter
def elastic_transform(signal, labels, alpha, sigma, bg_value=0.0):
"""
Elastic deformation for 1D signals, modified from:
[Simard2003] Simard, Steinkraus and Platt, "Best Practices for
Convolutional Neural Networks applied to Visual Document Analysis", in
Proc. of the International Conference on Document Analysis and
Recognition, 2003.
Modified from:
https://gist.github.com/chsasank/4d8f68caf01f041a6453e67fb30f8f5a
Deforms both the signal and labels if len(labels) == len(signal)
Signal linearly interpolated
Labels nearest neighbour interpolated
"""
assert signal.ndim in (1, 2, 3)
org_sig_shape = signal.shape
org_lab_shape = labels.shape
if signal.ndim == 3:
signal = signal.reshape(-1, signal.shape[-1])
labels = labels.reshape(-1, 1)
elif signal.ndim == 1:
signal = np.expand_dims(signal, axis=-1)
seg_length = signal.shape[0]
channels = signal.shape[1]
dtype = signal.dtype
# Define coordinate system
coords = (np.arange(seg_length),)
# Initialize interpolators
intrps = []
for i in range(channels):
intrps.append(RegularGridInterpolator(coords, signal[:, i],
method="linear",
bounds_error=False,
fill_value=bg_value,
dtype=np.float32))
# Get random elastic deformations
dx = gaussian_filter((np.random.rand(seg_length) * 2 - 1), sigma,
mode="constant", cval=0.) * alpha
# Define sample points
indices = np.reshape(coords[0] + dx, (-1, 1))
# Interpolate all signal channels
signal = np.empty(shape=signal.shape, dtype=dtype)
for i, intrp in enumerate(intrps):
signal[:, i] = intrp(indices)
# Interpolate labels if passed, only if same shape as input
if labels is not None and len(labels) == len(signal):
lab_intrp = RegularGridInterpolator(coords, labels,
method="nearest",
bounds_error=False,
fill_value=0,
dtype=np.uint8)
labels = lab_intrp(indices).astype(labels.dtype)
return signal.reshape(org_sig_shape), labels.reshape(org_lab_shape)
| [
"[email protected]"
] | |
88f954cd4c147c22b62406dd9e1848a41d7bfac9 | 675d1ad3ebb58d6bf177430568bb35a7319ce30b | /plotters/limits.py | 0eab4cdcf3c08d0721d27c24e9679c39f23d5596 | [] | no_license | kdlong/InitialStateAnalysis | 30a0382098014087982059908c2f74ccaff0c3d9 | 2a1f7275de6bcbcf53127ad2e949b20d2443e8bc | refs/heads/master | 2021-01-18T07:02:12.114624 | 2015-04-14T16:27:52 | 2015-04-14T16:27:52 | 30,890,597 | 0 | 0 | null | 2015-02-16T22:02:24 | 2015-02-16T22:02:23 | Python | UTF-8 | Python | false | false | 4,461 | py | '''
Class for plotting limits.
'''
import sys
import os
import errno
import numpy as np
import CMS_lumi, tdrstyle
from plotUtils import _3L_MASSES, _4L_MASSES, python_mkdir
sys.argv.append('-b')
import ROOT
sys.argv.pop()
ROOT.PyConfig.IgnoreCommandLineOptions = True
ROOT.gROOT.SetBatch(ROOT.kTRUE)
ROOT.gROOT.ProcessLine("gErrorIgnoreLevel = 1001;")
tdrstyle.setTDRStyle()
def save(savename,saveDir,canvas):
'''Save the limits into root file and images.'''
#for type in ['png', 'pdf', 'eps']:
for type in ['png']:
name = "%s/%s/%s.%s" % (saveDir, type, savename, type)
python_mkdir(os.path.dirname(name))
canvas.Print(name)
#canvas.SetName(savename)
#savefile.WriteTObject(self.canvas)
#canvas.Clear()
def plot_limits(analysis, period, savename, **kwargs):
'''Plot limits and get exclusion limits'''
datacardBaseDir = kwargs.pop('datacardBaseDir','datacards')
limitDataBaseDir = kwargs.pop('limitDataBaseDir','limitData')
saveDir = kwargs.pop('saveDir','plots/limits')
blind = kwargs.pop('blind',True)
bp = kwargs.pop('branchingPoint','')
datacardDir = '%s/%s_%itev' % (datacardBaseDir, analysis, period)
if bp: datacardDir += '/%s' % bp
limitDataDir = '%s/%s_%itev' % (limitDataBaseDir, analysis, period)
if bp: limitDataDir += '/%s' % bp
masses = _3L_MASSES if analysis == 'Hpp3l' else _4L_MASSES
if period==13: masses = [500]
quartiles = np.empty((6, len(masses)), dtype=float)
for j, mass in enumerate(masses):
fname = os.path.join(limitDataDir, "higgsCombineTest.Asymptotic.mH%i.root" % mass)
file = ROOT.TFile(fname,"READ")
tree = file.Get("limit")
if not tree: continue
for i, row in enumerate(tree):
quartiles[i,j] = row.limit
n = len(masses)
twoSigma = ROOT.TGraph(2*n)
oneSigma = ROOT.TGraph(2*n)
expected = ROOT.TGraph(n)
if not blind: observed = ROOT.TGraph(n)
for i, mass in enumerate(masses):
twoSigma.SetPoint(i,masses[i],quartiles[4][i])
twoSigma.SetPoint(n+i,masses[n-i-1],quartiles[0][n-i-1])
oneSigma.SetPoint(i,masses[i],quartiles[3][i])
oneSigma.SetPoint(n+i,masses[n-i-1],quartiles[1][n-i-1])
expected.SetPoint(i,masses[i],quartiles[2][i])
if not blind: observed.SetPoint(i,masses[i],quartiles[5][i])
twoSigma.SetFillColor(ROOT.EColor.kYellow)
twoSigma.SetLineColor(ROOT.EColor.kYellow)
twoSigma.SetMarkerStyle(0)
oneSigma.SetFillColor(ROOT.EColor.kSpring)
oneSigma.SetLineColor(ROOT.EColor.kSpring)
oneSigma.SetMarkerStyle(0)
expected.SetLineStyle(7)
expected.SetMarkerStyle(0)
expected.SetFillStyle(0)
if not blind:
observed.SetMarkerStyle(0)
observed.SetFillStyle(0)
canvas = ROOT.TCanvas('c%s'%bp,'c%s'%bp,50,50,800,600)
canvas.SetFillColor(0)
canvas.SetBorderMode(0)
canvas.SetFrameFillStyle(0)
canvas.SetFrameBorderMode(0)
canvas.SetLeftMargin(0.12)
canvas.SetRightMargin(0.04)
canvas.SetTopMargin(0.08)
canvas.SetBottomMargin(0.12)
canvas.SetLogy(1)
expected.GetXaxis().SetLimits(masses[0],masses[-1])
expected.GetXaxis().SetTitle('#Phi^{++} Mass (GeV)')
expected.GetYaxis().SetTitle('95% CLs Upper Limit on #sigma/#sigma_{SM}')
expected.GetYaxis().SetTitleOffset(1.)
expected.GetYaxis().SetTitleSize(0.05)
twoSigma.Draw('f')
oneSigma.Draw('f')
expected.Draw()
if not blind: observed.Draw()
ratiounity = ROOT.TLine(expected.GetXaxis().GetXmin(),1,expected.GetXaxis().GetXmax(),1)
ratiounity.Draw()
legend = ROOT.TLegend(0.65,0.2,0.90,0.4)
legend.SetFillColor(0)
if not blind: legend.AddEntry(observed, 'Observed')
legend.AddEntry(expected, 'Expected')
legend.AddEntry(twoSigma, 'Expected 2#sigma', 'F')
legend.AddEntry(oneSigma, 'Expected 1#sigma', 'F')
legend.Draw('same')
lumiperiod = 2 if period == 8 else 4
CMS_lumi.wrtieExtraText = True
CMS_lumi.extraText = "Preliminary" if not blind else "Simulation Preliminary"
CMS_lumi.lumi_7TeV = "%0.1f fb^{-1}" % (4.9)
CMS_lumi.lumi_8TeV = "%0.1f fb^{-1}" % (19.7)
CMS_lumi.lumi_13TeV = "%0.1f fb^{-1}" % (25.0)
CMS_lumi.CMS_lumi(canvas,lumiperiod,11)
save(savename,saveDir,canvas)
y = 0
for x in range(masses[0],masses[-1]):
y = expected.Eval(x)
if y > 1: break
print "Expected Limit: %i GeV" % x
| [
"[email protected]"
] | |
2375f031e40a31c4eadc37b25c1a2e45e111c9bd | 9e988c0dfbea15cd23a3de860cb0c88c3dcdbd97 | /sdBs/AllRun/sdssj9-10_104054.47+112023.7/sdB_sdssj9-10_104054.47+112023.7_coadd.py | 4de1d11e6ffb1bc19dfcb19efea75a26b321efa2 | [] | no_license | tboudreaux/SummerSTScICode | 73b2e5839b10c0bf733808f4316d34be91c5a3bd | 4dd1ffbb09e0a599257d21872f9d62b5420028b0 | refs/heads/master | 2021-01-20T18:07:44.723496 | 2016-08-08T16:49:53 | 2016-08-08T16:49:53 | 65,221,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | from gPhoton.gMap import gMap
def main():
gMap(band="NUV", skypos=[160.226958,11.339917], skyrange=[0.0333333333333,0.0333333333333], stepsz = 30., cntfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdBs/sdB_sdssj9-10_104054.47+112023.7/sdB_sdssj9-10_104054.47+112023.7_movie_count.fits", cntcoaddfile="/data2/fleming/GPHOTON_OUTPUT/LIGHTCURVES/sdB/sdB_sdssj9-10_104054.47+112023.7/sdB_sdssj9-10_104054.47+112023.7_count_coadd.fits", overwrite=True, verbose=3)
if __name__ == "__main__":
main()
| [
"[email protected]"
] | |
c0c28a81cd864c47e81a5589f67dc159c3c4086b | 151dc9e2f00b202a5085189ac5e5df06eed1b77a | /bot/migrations/0002_food_ingredients.py | 6a59f2980a0f2a0dc788dbb431c459bb5c62c5f5 | [] | no_license | ochui/foodbot | 074d818308ee72c895857641333a71aebe6f36c0 | c4dcda9a94a270ca26a58383d3719ed312b06907 | refs/heads/master | 2023-08-25T10:06:22.793546 | 2021-11-02T23:22:03 | 2021-11-02T23:22:03 | 420,014,796 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.2.8 on 2021-10-22 09:24
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('bot', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='food',
name='ingredients',
field=models.ManyToManyField(to='bot.Ingredient'),
),
]
| [
"[email protected]"
] | |
2690394c577796e63ca8242a797e503b406ccb1a | a665936ae4c630ae9ef4c8b106aef1d0dcb3309b | /pro57.py | d63d9a4c653a8a0a3d28728a19a171d8b6b580fc | [] | no_license | Hemavarsheni/codekata | bbbd54e0282c0a5724ef58f6abd215cb57fe0cd8 | 577c8347991784d1b9b1b4364045cde3257ee9db | refs/heads/master | 2020-06-13T23:50:44.237011 | 2019-08-16T12:11:29 | 2019-08-16T12:11:29 | 194,827,236 | 0 | 10 | null | null | null | null | UTF-8 | Python | false | false | 343 | py | #Hemavarsheni
a,b=map(str,input().split())
lista=[]
listb=[]
for i in range(len(a)-1):
s=""
for j in range(i,i+2):
s=s+a[j]
lista.append(s)
for i in range(len(b)-1):
s=""
for j in range(i,i+2):
s=s+b[j]
listb.append(s)
for i in lista:
if i in listb:
print("yes")
exit(0)
print("no")
| [
"[email protected]"
] | |
5ef239b0126778ed8d7aeba3dc594138312d9df0 | 9f66941e73cad0c215601339512b8dd6729792da | /Show_Program_exe/predictModelUI/tensorflow/_api/v2/profiler/experimental/client/__init__.py | 6c373c04418802ba602b7d8a351fd76131351e7e | [] | no_license | NgaAdrain/TEAM_Enter_Text_Here | eb0e02c13959b90eecc0c69d2b24adb23a50150a | a7217438284360e06c93d37092dca1afcecb735a | refs/heads/master | 2023-07-20T11:42:50.353497 | 2021-05-28T08:30:53 | 2021-05-28T08:30:53 | 267,983,495 | 2 | 0 | null | 2023-07-06T22:00:36 | 2020-05-30T00:50:59 | Python | UTF-8 | Python | false | false | 419 | py | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.profiler.experimental.client namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.profiler.profiler_client import monitor
from tensorflow.python.profiler.profiler_client import trace
del _print_function
| [
"[email protected]"
] | |
469233826fa76271f9d1c92d61ca030c6ebe3463 | ac191a3e973b0d66baa559fdd27af5e0012fe67b | /shipyard/utils.py | eab1698afc173b87779a9d3d0ce8a92c8445ae1d | [
"LicenseRef-scancode-warranty-disclaimer",
"Apache-2.0"
] | permissive | monokrome/shipyard | fb2ab707643e52c3acfd20dfaf4d269a8b5dc195 | 3a37105466e4ac30b260a6b40a467e0412b4fc13 | refs/heads/master | 2022-01-17T23:31:26.186270 | 2013-10-23T06:29:21 | 2013-10-23T06:29:21 | 13,798,136 | 1 | 0 | null | 2022-01-06T22:53:10 | 2013-10-23T09:02:08 | Python | UTF-8 | Python | false | false | 2,347 | py | # Copyright 2013 Evan Hazlett and contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansi2html import Ansi2HTMLConverter
from django.conf import settings
import redis
def get_short_id(container_id):
return container_id[:12]
def convert_ansi_to_html(text, full=False):
converted = ''
try:
conv = Ansi2HTMLConverter(markup_lines=True, linkify=False, escaped=False)
converted = conv.convert(text.replace('\n', ' <br/>'), full=full)
except Exception, e:
converted = text
return converted
def update_hipache(app_id=None):
from applications.models import Application
if getattr(settings, 'HIPACHE_ENABLED'):
app = Application.objects.get(id=app_id)
redis_host = getattr(settings, 'HIPACHE_REDIS_HOST')
redis_port = getattr(settings, 'HIPACHE_REDIS_PORT')
rds = redis.Redis(host=redis_host, port=redis_port)
with rds.pipeline() as pipe:
domain_key = 'frontend:{0}'.format(app.domain_name)
# remove existing
pipe.delete(domain_key)
pipe.rpush(domain_key, app.id)
# add upstreams
for c in app.containers.all():
port = c.get_ports()[app.backend_port]
upstream = '{0}://{1}:{2}'.format(app.protocol, c.host.hostname,
port)
pipe.rpush(domain_key, upstream)
pipe.execute()
return True
return False
def remove_hipache_config(domain_name=None):
if getattr(settings, 'HIPACHE_ENABLED'):
redis_host = getattr(settings, 'HIPACHE_REDIS_HOST')
redis_port = getattr(settings, 'HIPACHE_REDIS_PORT')
rds = redis.Redis(host=redis_host, port=redis_port)
domain_key = 'frontend:{0}'.format(domain_name)
# remove existing
rds.delete(domain_key)
| [
"[email protected]"
] | |
cab5ec73309abc6f8c1a012ccccd0e4dc50f50b4 | 91f4078045a57eaaafe0b172909d7041e829941c | /arjuna-samples/arjex/test/pkg/gns_adv/check_gnsadv_07_waiters_default_wait.py | 21418e6c64e6736cfe53b4ca0f3d66b1a111ea50 | [
"Apache-2.0"
] | permissive | amiablea2/arjuna | 0d06d1dfb34309f4b6f39b17298f7acb6c3c48c9 | af74e0882216881ceca0a10f26442165ffc43287 | refs/heads/master | 2023-08-21T20:04:30.416303 | 2021-10-27T06:41:40 | 2021-10-27T06:41:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,261 | py | # This file is a part of Arjuna
# Copyright 2015-2021 Rahul Verma
# Website: www.RahulVerma.net
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from arjuna import *
from arjex.lib.gns_adv.app_page_section.app import WordPress
@for_module
def dashboard(request):
# Setup
wordpress = WordPress(section_dir="simple")
home = wordpress.launch()
dashboard = home.login_with_default_creds()
yield dashboard
# Teadown
dashboard.top_nav.logout()
wordpress.quit()
@test
def check_wait_until_absent_gns_1(request, dashboard):
dashboard.left_nav.gns.wait_until_absent("non_existing")
try:
# It is present
dashboard.left_nav.gns.wait_until_absent("settings")
except GuiWidgetForLabelPresentError as e:
print("Exception as Expected")
print(str(e))
except Exception as e:
raise Exception("Unexpected exception raise: ", str(e))
else:
raise Exception("Exception not raised.")
@test
def check_wait_until_absent_gns_2(request, dashboard):
dashboard.left_nav.wait_until_absent(id="non_existing")
try:
# It is present
dashboard.left_nav.wait_until_absent(link="Settings")
except GuiWidgetPresentError as e:
print("Exception as Expected")
print(str(e))
except Exception as e:
raise Exception("Unexpected exception raise: ", str(e))
else:
raise Exception("Exception not raised.")
@test
def check_contains_gns_1(request, dashboard):
print(dashboard.left_nav.gns.contains("settings"))
print(dashboard.left_nav.gns.contains("non_existing"))
@test
def check_contains_gns_2(request, dashboard):
print(dashboard.left_nav.contains(link="Settings"))
print(dashboard.left_nav.contains(id="non_existing"))
| [
"[email protected]"
] | |
047d255cb913eb1fc1bcac608765d94e5a1a7538 | 51f887286aa3bd2c3dbe4c616ad306ce08976441 | /pybind/slxos/v17s_1_02/ipv6/route/link_local_static_route_nh/__init__.py | f409793989d66c90174629d2778ac54c0f5f5de6 | [
"Apache-2.0"
] | permissive | b2220333/pybind | a8c06460fd66a97a78c243bf144488eb88d7732a | 44c467e71b2b425be63867aba6e6fa28b2cfe7fb | refs/heads/master | 2020-03-18T09:09:29.574226 | 2018-04-03T20:09:50 | 2018-04-03T20:09:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,384 | py |
from operator import attrgetter
import pyangbind.lib.xpathhelper as xpathhelper
from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType
from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType
from pyangbind.lib.base import PybindBase
from decimal import Decimal
from bitarray import bitarray
import __builtin__
import route_attributes
class link_local_static_route_nh(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module brocade-common-def - based on the path /ipv6/route/link-local-static-route-nh. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__link_local_static_route_dest','__link_local_nexthop','__link_local_route_oif_type','__link_local_route_oif_name','__route_attributes',)
_yang_name = 'link-local-static-route-nh'
_rest_name = 'link-local-static-route-nh'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
path_helper_ = kwargs.pop("path_helper", None)
if path_helper_ is False:
self._path_helper = False
elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper):
self._path_helper = path_helper_
elif hasattr(self, "_parent"):
path_helper_ = getattr(self._parent, "_path_helper", False)
self._path_helper = path_helper_
else:
self._path_helper = False
extmethods = kwargs.pop("extmethods", None)
if extmethods is False:
self._extmethods = False
elif extmethods is not None and isinstance(extmethods, dict):
self._extmethods = extmethods
elif hasattr(self, "_parent"):
extmethods = getattr(self._parent, "_extmethods", None)
self._extmethods = extmethods
else:
self._extmethods = False
self.__route_attributes = YANGDynClass(base=route_attributes.route_attributes, is_container='container', presence=False, yang_name="route-attributes", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='container', is_config=True)
self.__link_local_static_route_dest = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="link-local-static-route-dest", rest_name="link-local-static-route-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN ;; Destination IPv6 Prefix'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-prefix', is_config=True)
self.__link_local_route_oif_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..16']}), is_leaf=True, yang_name="link-local-route-oif-name", rest_name="linklocalinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'linklocalinterface'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='string', is_config=True)
self.__link_local_route_oif_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ethernet': {'value': 1}, u'management': {'value': 3}, u'null': {'value': 5}, u've': {'value': 4}},), is_leaf=True, yang_name="link-local-route-oif-type", rest_name="link-local-route-oif-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Outgoing interface type'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='enumeration', is_config=True)
self.__link_local_nexthop = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-nexthop", rest_name="link-local-nexthop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D ;; Next hop IPv6 address'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-address', is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path()+[self._yang_name]
else:
return [u'ipv6', u'route', u'link-local-static-route-nh']
def _rest_path(self):
if hasattr(self, "_parent"):
if self._rest_name:
return self._parent._rest_path()+[self._rest_name]
else:
return self._parent._rest_path()
else:
return [u'ipv6', u'route', u'link-local-static-route-nh']
def _get_link_local_static_route_dest(self):
"""
Getter method for link_local_static_route_dest, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_static_route_dest (inet:ipv6-prefix)
"""
return self.__link_local_static_route_dest
def _set_link_local_static_route_dest(self, v, load=False):
"""
Setter method for link_local_static_route_dest, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_static_route_dest (inet:ipv6-prefix)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_static_route_dest is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_static_route_dest() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="link-local-static-route-dest", rest_name="link-local-static-route-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN ;; Destination IPv6 Prefix'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-prefix', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_static_route_dest must be of a type compatible with inet:ipv6-prefix""",
'defined-type': "inet:ipv6-prefix",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="link-local-static-route-dest", rest_name="link-local-static-route-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN ;; Destination IPv6 Prefix'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-prefix', is_config=True)""",
})
self.__link_local_static_route_dest = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_static_route_dest(self):
self.__link_local_static_route_dest = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(/(([0-9])|([0-9]{2})|(1[0-1][0-9])|(12[0-8])))'}), is_leaf=True, yang_name="link-local-static-route-dest", rest_name="link-local-static-route-dest", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D/LEN ;; Destination IPv6 Prefix'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-prefix', is_config=True)
def _get_link_local_nexthop(self):
"""
Getter method for link_local_nexthop, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_nexthop (inet:ipv6-address)
"""
return self.__link_local_nexthop
def _set_link_local_nexthop(self, v, load=False):
"""
Setter method for link_local_nexthop, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_nexthop (inet:ipv6-address)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_nexthop is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_nexthop() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-nexthop", rest_name="link-local-nexthop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D ;; Next hop IPv6 address'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-address', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_nexthop must be of a type compatible with inet:ipv6-address""",
'defined-type': "inet:ipv6-address",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-nexthop", rest_name="link-local-nexthop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D ;; Next hop IPv6 address'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-address', is_config=True)""",
})
self.__link_local_nexthop = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_nexthop(self):
self.__link_local_nexthop = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'pattern': u'((:|[0-9a-fA-F]{0,4}):)([0-9a-fA-F]{0,4}:){0,5}((([0-9a-fA-F]{0,4}:)?(:|[0-9a-fA-F]{0,4}))|(((25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])\\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9]?[0-9])))(%[\\p{N}\\p{L}]+)?'}), is_leaf=True, yang_name="link-local-nexthop", rest_name="link-local-nexthop", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'A:B::C:D ;; Next hop IPv6 address'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='inet:ipv6-address', is_config=True)
def _get_link_local_route_oif_type(self):
"""
Getter method for link_local_route_oif_type, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_route_oif_type (enumeration)
"""
return self.__link_local_route_oif_type
def _set_link_local_route_oif_type(self, v, load=False):
"""
Setter method for link_local_route_oif_type, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_route_oif_type (enumeration)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_route_oif_type is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_route_oif_type() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ethernet': {'value': 1}, u'management': {'value': 3}, u'null': {'value': 5}, u've': {'value': 4}},), is_leaf=True, yang_name="link-local-route-oif-type", rest_name="link-local-route-oif-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Outgoing interface type'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='enumeration', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_route_oif_type must be of a type compatible with enumeration""",
'defined-type': "brocade-ipv6-rtm:enumeration",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ethernet': {'value': 1}, u'management': {'value': 3}, u'null': {'value': 5}, u've': {'value': 4}},), is_leaf=True, yang_name="link-local-route-oif-type", rest_name="link-local-route-oif-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Outgoing interface type'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='enumeration', is_config=True)""",
})
self.__link_local_route_oif_type = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_route_oif_type(self):
self.__link_local_route_oif_type = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_type="dict_key", restriction_arg={u'ethernet': {'value': 1}, u'management': {'value': 3}, u'null': {'value': 5}, u've': {'value': 4}},), is_leaf=True, yang_name="link-local-route-oif-type", rest_name="link-local-route-oif-type", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Outgoing interface type'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='enumeration', is_config=True)
def _get_link_local_route_oif_name(self):
"""
Getter method for link_local_route_oif_name, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_route_oif_name (string)
"""
return self.__link_local_route_oif_name
def _set_link_local_route_oif_name(self, v, load=False):
"""
Setter method for link_local_route_oif_name, mapped from YANG variable /ipv6/route/link_local_static_route_nh/link_local_route_oif_name (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_link_local_route_oif_name is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_link_local_route_oif_name() directly.
"""
parent = getattr(self, "_parent", None)
if parent is not None and load is False:
raise AttributeError("Cannot set keys directly when" +
" within an instantiated list")
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..16']}), is_leaf=True, yang_name="link-local-route-oif-name", rest_name="linklocalinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'linklocalinterface'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='string', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """link_local_route_oif_name must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..16']}), is_leaf=True, yang_name="link-local-route-oif-name", rest_name="linklocalinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'linklocalinterface'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='string', is_config=True)""",
})
self.__link_local_route_oif_name = t
if hasattr(self, '_set'):
self._set()
def _unset_link_local_route_oif_name(self):
self.__link_local_route_oif_name = YANGDynClass(base=RestrictedClassType(base_type=unicode, restriction_dict={'length': [u'1..16']}), is_leaf=True, yang_name="link-local-route-oif-name", rest_name="linklocalinterface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'alt-name': u'linklocalinterface'}}, is_keyval=True, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='string', is_config=True)
def _get_route_attributes(self):
"""
Getter method for route_attributes, mapped from YANG variable /ipv6/route/link_local_static_route_nh/route_attributes (container)
"""
return self.__route_attributes
def _set_route_attributes(self, v, load=False):
"""
Setter method for route_attributes, mapped from YANG variable /ipv6/route/link_local_static_route_nh/route_attributes (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_route_attributes is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_route_attributes() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=route_attributes.route_attributes, is_container='container', presence=False, yang_name="route-attributes", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """route_attributes must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=route_attributes.route_attributes, is_container='container', presence=False, yang_name="route-attributes", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='container', is_config=True)""",
})
self.__route_attributes = t
if hasattr(self, '_set'):
self._set()
def _unset_route_attributes(self):
self.__route_attributes = YANGDynClass(base=route_attributes.route_attributes, is_container='container', presence=False, yang_name="route-attributes", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-compact-syntax': None, u'cli-drop-node-name': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-rtm', defining_module='brocade-ipv6-rtm', yang_type='container', is_config=True)
link_local_static_route_dest = __builtin__.property(_get_link_local_static_route_dest, _set_link_local_static_route_dest)
link_local_nexthop = __builtin__.property(_get_link_local_nexthop, _set_link_local_nexthop)
link_local_route_oif_type = __builtin__.property(_get_link_local_route_oif_type, _set_link_local_route_oif_type)
link_local_route_oif_name = __builtin__.property(_get_link_local_route_oif_name, _set_link_local_route_oif_name)
route_attributes = __builtin__.property(_get_route_attributes, _set_route_attributes)
_pyangbind_elements = {'link_local_static_route_dest': link_local_static_route_dest, 'link_local_nexthop': link_local_nexthop, 'link_local_route_oif_type': link_local_route_oif_type, 'link_local_route_oif_name': link_local_route_oif_name, 'route_attributes': route_attributes, }
| [
"[email protected]"
] | |
cc827a59a8376d853d13dc82c896378c171fca75 | d2deead5da9b4999eb8cee6235b43c897f468212 | /Week2/helloworld/helloworld/urls.py | 66b21bef9a31d85d169b6a5d7aa68503314edfbf | [] | no_license | mkeshilbaev/BFDjango | 39f829d6362f4acbd502f98198facf482f1fd392 | caa34eb8e456e20cacadaa5b4e20949fa32ff529 | refs/heads/master | 2023-05-02T22:39:25.023549 | 2020-04-25T14:16:22 | 2020-04-25T14:16:22 | 233,826,635 | 0 | 0 | null | 2023-04-21T20:48:01 | 2020-01-14T11:30:49 | Python | UTF-8 | Python | false | false | 802 | py | """helloworld URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('main/', include('main.urls')),
]
| [
"[email protected]"
] | |
822d8811b069aed67ce8cd1f23bbf59c6c4a9c45 | 0b25dc3f9b4ef736e739aadddec33b96dd65a0c8 | /huawei/21.py | 7302d8adf92d83dba6e7130102b2a602b4176164 | [] | no_license | ttp55/LearnPy | b123f44a74e4364771491c572705742c15eb33ff | 1530e158bde152e5c585f496dd1e5ffcffdb73bc | refs/heads/master | 2023-05-11T13:02:30.157285 | 2023-05-08T07:13:57 | 2023-05-08T07:13:57 | 196,953,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 253 | py | # @Time : 2022/2/8 13:58
# @Author : WZG
# --coding:utf-8--
l = [1,2,3,4,5,5]
print(l[3:]+l[:3])
s = input()
while '{}' in s or '()' in s or '[]' in s:
s = s.replace('{}', '')
s = s.replace('[]', '')
s = s.replace('()', '')
print(not s)
| [
"[email protected]"
] | |
1659e77a9f834c52289989c0d18398077d405e47 | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/308/usersdata/290/72816/submittedfiles/ex1.py | 9d833bfe06bccfb760241d32e0c22608bc1e9950 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | # -*- coding: utf-8 -*-
from __future__ import division
a = input('Digite a: ')
b = input('Digite b: ')
c = input('Digite c: ')
#COMECE A PARTIR DAQUI!
D=((b**2)-(4*a*c))
print(a) | [
"[email protected]"
] | |
a46f5b9eedcb1195e2fc993c39f96762d17c98b0 | bad67fafaa475e8dc7035e8a21e80e77cd90f43a | /manage.py | 3282f2314889e472c8fea59e71c5d8aa2c01f038 | [] | no_license | Aitmatow/Djago_girlst_test | 66c4a59f00b2ef95bee905c99d9d15d85bee1878 | a436e1b561dc8d0e8d7e4a88053ead74301c8aaa | refs/heads/master | 2022-09-19T12:08:37.493160 | 2020-06-03T05:39:40 | 2020-06-03T05:39:40 | 268,985,447 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 627 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Pycharm.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
a1add44754eec4db72df64bfc3f294a01ec01a29 | e78433e847c5a5ff1ed583303c0240b0a93dc06a | /0x01-python-if_else_loops_functions/6-print_comb3.py | 2682ef8c4fae7eef14ca880d7b9148e40c1481e8 | [] | no_license | david-develop/holbertonschool-higher_level_programming | 75eaef38173361cc4ef9202f3e3f6c5a781239ed | 90f62136a7ddfb7d3921daccdc38d50a5376343b | refs/heads/master | 2020-07-22T23:27:35.494155 | 2020-03-26T17:34:36 | 2020-03-26T17:34:36 | 207,366,767 | 0 | 4 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | #!/usr/bin/python3
for num1 in range(0, 8):
for num2 in range(num1 + 1, 10):
print("{}{}".format(num1, num2), end=', ')
print("{}{}".format(num1 + 1, num2))
| [
"[email protected]"
] | |
04ba26d95374cc9f8beb1e7b9eb48e8d9e9fd7cc | ce2a6330c807591f95210bdda005f875a76b98a8 | /model_lab/ml_models/titanic/scripts/train.py | 22de07daecff46eabb25b6cef569ac1b056d0c75 | [] | no_license | MartinThoma/model-lab | c1ca8d0950086cd4e332862d6a3b7a7ae7f10a2f | 3525cdd5271a1b7940f801d4245343b1592ffa67 | refs/heads/master | 2020-04-11T18:43:54.537892 | 2018-12-31T20:14:40 | 2018-12-31T20:17:02 | 162,009,569 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,893 | py | """Train a model for Titanic."""
import re
import math
import pickle
# 3rd party modules
import numpy as np
import pandas as pd
from sklearn.ensemble import ExtraTreesRegressor
def main():
"""Load, train, serialize, test."""
data = load_data()
analyze_features(data['full_features'])
model = train(data)
with open('model.pickle', 'wb') as f:
pickle.dump(model, f)
evaluate(model, data)
def load_data():
"""Load the titanic dataset."""
train = pd.read_csv("../input/train.csv", dtype={"Age": np.float64}, )
test = pd.read_csv("../input/test.csv", dtype={"Age": np.float64}, )
train = train.set_index('PassengerId')
test = test.set_index('PassengerId')
train = train.apply(preprocess, axis=1)
test = test.apply(preprocess, axis=1)
x_train = train.drop(['Survived'], axis=1)
y_train = train['Survived']
x_test = test
return {'train': {'x': x_train, 'y': y_train},
'test': {'x': x_test},
'full_features': pd.concat([x_train, x_test])}
def preprocess(item):
"""Preprocess the dictionary 'item'."""
item = feature_engineering(item)
item = encode_features(item)
return item
def feature_engineering(item):
"""
Develop new features.
Parameters
----------
item : Dict[str, Any]
Returns
-------
item : Dict[str, Any]
"""
if item["Cabin"] is None:
item["Cabin"] = " "
if item["Age"] is None or math.isnan(item["Age"]):
item["Age"] = 18 # ????
if item["Fare"] is None or math.isnan(item["Fare"]):
item["Fare"] = -1 # ????
def get_title(x):
return re.search(' ([A-Za-z]+)\.', x).group(1)
item["Title"] = get_title(item["Name"])
return item
def encode_features(item):
"""
Encode features for machine learning models.
This step has no value for humans, in contrast to the feature_engineering
step.
"""
item['is_male'] = int(item['Sex'] == 'male')
del item['Name']
del item['Sex']
# del item['Fare']
del item['Cabin']
del item['Ticket']
# One-hot encoding: Embarked
item['embarked_s'] = int(item['Embarked'] == 'S')
item['embarked_c'] = int(item['Embarked'] == 'C')
item['embarked_q'] = int(item['Embarked'] == 'Q')
del item['Embarked']
# One-hot encoding: Title
item['title_mr'] = int(item['Title'] == 'Mr')
item['title_miss'] = int(item['Title'] == 'Miss')
item['title_mrs'] = int(item['Title'] == 'Mrs')
item['title_master'] = int(item['Title'] == 'Master')
item['title_other'] = 1 - (item['title_mr'] +
item['title_miss'] +
item['title_mrs'] +
item['title_master'])
del item['Title']
return item
def analyze_features(df_features):
for column in df_features.columns:
print('## ' + column)
value_counts = df_features[column].value_counts()
if len(value_counts) > 10:
print('Many values')
else:
print(value_counts)
count_nan = len(df_features[column]) - df_features[column].count()
if count_nan > 0:
print('has nan')
print('')
def train(data):
etr = ExtraTreesRegressor(n_estimators=10)
etr.fit(data['train']['x'], np.ravel(data['train']['y']))
return etr
def evaluate(model, data):
score = model.score(data['train']['x'], data['train']['y'])
print("Accuracy: %0.3f".format(score * 100))
predictions = model.predict(data['test']['x'])
passenger_id = np.array(data['test']['x'].index).astype(int)
my_prediction = pd.DataFrame(predictions,
passenger_id,
columns=["Survived"])
my_prediction.to_csv("my_prediction.csv", index_label=["PassengerId"])
if __name__ == '__main__':
main()
| [
"[email protected]"
] | |
220acf2053b36953db69197b5b7cd45c5040bc57 | 48b9d828acf80792bc4385febaa734a2e96ad465 | /test-openmps/Examples/05_Fermions_LongRange.py | fd886085ce8c20f16a78e8dadbd894ef399ac75e | [
"MIT"
] | permissive | OminiaVincit/qphase-trans | dd4ab2e0cacc449ead3bef318a65eb05aed45621 | 40e0c078dcd74282e8d8f44690433bf670bff8cb | refs/heads/master | 2023-05-06T12:14:30.368375 | 2021-05-28T05:11:58 | 2021-05-28T05:11:58 | 235,478,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,316 | py | import MPSPyLib as mps
import numpy as np
import sys
import os.path
def main(PostProcess=False):
"""
Introductory example for openMPS to simulate a fermionic system with
long-range interactions. Two modes are available when running the
example from command line:
* ``python LongRangeTunneling.py --PostProcess=F`` : runs the MPSFortLib to
determine the ground state statics (initial state).
(default if ``--PostProcess`` not present.)
* ``python LongRangeTunneling.py --PostProcess=T`` : printing the results
of the simulation run before.
"""
# Build operators
Operators = mps.BuildFermiOperators()
# Define Hamiltonian MPO
H = mps.MPO(Operators)
H.AddMPOTerm('FiniteFunction', ['fdagger','f'], f=[1.0, -0.2],
hparam='t', weight=-1.0, Phase=True)
# Observables
myObservables = mps.Observables(Operators)
# Site terms
myObservables.AddObservable('site', 'nftotal', 'n')
# Correlation functions
myObservables.AddObservable('corr', ['fdagger', 'f'], 'spdm', Phase=True)
# Convergence parameters
myConv = mps.MPSConvParam(max_bond_dimension=30, max_num_sweeps=2)
myConv.AddModifiedConvergenceParameters(0, ['max_bond_dimension',
'local_tol'], [50, 1E-14])
# Specify constants and parameter list
t = 1.0
L = 10
N = 5
parameters = [{
'simtype' : 'Finite',
# Directories
'job_ID' : 'LongRangeTunneling_',
'unique_ID' : 'L_' + str(L) + 'N' + str(N),
'Write_Directory' : 'TMP_05/',
'Output_Directory' : 'OUTPUTS_05/',
# System size and Hamiltonian parameters
'L' : L,
't' : t,
# Specification of symmetries and good quantum numbers
'Abelian_generators' : ['nftotal'],
'Abelian_quantum_numbers' : [N],
'MPSObservables' : myObservables,
'MPSConvergenceParameters' : myConv,
'logfile' : True
}]
# Write Fortran-readable main files
MainFiles = mps.WriteFiles(parameters, Operators, H,
PostProcess=PostProcess)
# Run the simulations and quit if not just post processing
if(not PostProcess):
if os.path.isfile('./Execute_MPSMain'):
RunDir = './'
else:
RunDir = None
mps.runMPS(MainFiles, RunDir=RunDir)
return
# Postprocessing
# --------------
Outputs = mps.ReadStaticObservables(parameters)
# Get observables of state computed with most stringent convergence criteria
fullyconvergedOutputs = mps.GetObservables(Outputs,
'convergence_parameter', 2)
spdm = fullyconvergedOutputs[0]['spdm']
spdmeigs, U = np.linalg.eigh(spdm)
print('Eigenvalues of <f^{\dagger}_i f_j>', spdmeigs)
return
if(__name__ == '__main__'):
# Check for command line arguments
Post = False
for arg in sys.argv[1:]:
key, val = arg.split('=')
if(key == '--PostProcess'): Post = (val == 'T') or (val == 'True')
# Run main function
main(PostProcess=Post)
| [
"[email protected]"
] | |
64c1f70e329c85405d3ab1ac2f692dfbe8516499 | ded10c2f2f5f91c44ec950237a59225e8486abd8 | /.history/2/matrix_squaring_20200423020640.py | 59367a0a4ab8703f9cda9c70a66fc9fe3cec0b6e | [] | no_license | jearistiz/Statistical-Physics-Projects | 276a86407b32ded4e06b32efb2fadbd8eff8daed | d9c5b16a50856e148dc8604d92b6de3ea21fc552 | refs/heads/master | 2022-11-05T03:41:23.623050 | 2020-06-28T06:36:05 | 2020-06-28T06:36:05 | 254,909,897 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 35,579 | py | # -*- coding: utf-8 -*-
from __future__ import division
import os
import numpy as np
import matplotlib.pyplot as plt
from time import time
import pandas as pd
# Author: Juan Esteban Aristizabal-Zuluaga
# date: 20200414
def rho_free(x,xp,beta):
"""Uso: devuelve elemento de matriz dsnsidad para el caso de una partรญcula libre en
un toro infinito.
"""
return (2.*np.pi*beta)**(-0.5) * np.exp(-(x-xp)**2 / (2 * beta))
def harmonic_potential(x):
"""Uso: Devuelve valor del potencial armรณnico para una posiciรณn x dada"""
return 0.5*x**2
def anharmonic_potential(x):
"""Devuelve valor de potencial anarmรณnico para una posiciรณn x dada"""
# return np.abs(x)*(1+np.cos(x)) #el resultado de este potencial es interesante
return 0.5*x**2 - x**3 + x**4
def QHO_canonical_ensemble(x,beta):
"""
Uso: calcula probabilidad teรณrica cuรกntica de encontrar al oscilador armรณnico
(inmerso en un baรฑo tรฉrmico a temperatura inversa beta) en la posiciรณn x.
Recibe:
x: float -> posiciรณn
beta: float -> inverso de temperatura en unidades reducidas beta = 1/T.
Devuelve:
probabilidad teรณrica cuรกntica en posiciรณn x para temperatura inversa beta.
"""
return (np.tanh(beta/2.)/np.pi)**0.5 * np.exp(- x**2 * np.tanh(beta/2.))
def Z_QHO(beta):
"""Uso: devuelve valor de funciรณn de particiรณn para el QHO unidimensional"""
return 0.5/np.sinh(beta/2)
def E_QHO_avg_theo(beta):
"""Uso: devuelve valor de energรญa interna para el QHO unidimensional"""
return 0.5/np.tanh(0.5*beta)
def rho_trotter(x_max=5., nx=101, beta=1, potential=harmonic_potential):
"""
Uso: devuelve matriz densidad en aproximaciรณn de Trotter para altas temperaturas
y bajo influencia del potencial "potential".
Recibe:
x_max: float -> los valores de x estarรกn en el intervalo (-x_max,x_max).
nx: int -> nรบmero de valores de x considerados (igualmente espaciados).
beta: float -> inverso de temperatura en unidades reducidas.
potential: func -> potencial de interacciรณn. Debe ser solo funciรณn de x.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad en aproximaciรณn de Trotter para
altas temperaturas y potencial dado.
grid_x: numpy array, shape=(nx,) -> valores de x en los que estรก evaluada rho.
dx: float -> separaciรณn entre valores contiguos de grid_x
"""
nx = int(nx)
# Si nx es par lo cambiamos al impar mรกs cercano para incluir al 0 en valores de x
if nx%2 == 0:
nx = nx + 1
# Valor de la discretizaciรณn de posiciones segรบn x_max y nx dados como input
dx = 2 * x_max/(nx-1)
# Lista de valores de x teniendo en cuenta discretizaciรณn y x_max
grid_x = [i*dx for i in range(-int((nx-1)/2),int((nx-1)/2 + 1))]
# Construcciรณn de matriz densidad dada por aproximaciรณn de Trotter
rho = np.array([[rho_free(x , xp, beta) * np.exp(-0.5*beta*(potential(x)+potential(xp)))
for x in grid_x]
for xp in grid_x])
return rho, grid_x, dx
def density_matrix_squaring(rho, grid_x, N_iter=1, beta_ini=1, print_steps=True):
"""
Uso: devuelve matriz densidad luego de aplicarle algoritmo matrix squaring N_iter veces.
En la primera iteraciรณn se usa matriz de densidad dada por el input rho (a
temperatura inversa beta_ini); en las siguientes iteraciones se usa matriz densidad
generada por la iteraciรณn inmediatamente anterior. El sistema asociado a la matriz
densidad obtenida (al final de aplicar el algoritmo) estรก a temperatura inversa
beta_fin = beta_ini * 2**(N_iter).
Recibe:
rho: numpy array, shape=(nx,nx) -> matriz densidad discretizada en valores dados
por x_grid.
grid_x: numpy array, shape=(nx,) -> valores de x en los que estรก evaluada rho.
N_iter: int -> nรบmero de iteraciones del algoritmo.
beta_ini: float -> valor de inverso de temperatura asociado a la
matriz densidad rho dada como input.
print_steps: bool -> decide si muestra valores de beta en cada
iteraciรณn.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definiciรณn que tomamos
de rho, รฉsta es equivalente a la funciรณn
particiรณn a dicha temperatura.
beta_fin: float -> temperatura inversa del sistema asociado a rho.
"""
# Valor de discretizaciรณn de las posiciones
dx = grid_x[1] - grid_x[0]
# Cรกlculo del valor de beta_fin segรบn valores beta_ini y N_iter dados como input
beta_fin = beta_ini * 2 ** N_iter
# Itera algoritmo matrix squaring
if print_steps:
print('\nbeta_ini = %.3f'%beta_ini,
'\n----------------------------------------------------------------')
for i in range(N_iter):
rho = dx * np.dot(rho,rho)
# Imprime informaciรณn relevante
if print_steps:
print(u'Iteraciรณn %d) 2^%d * beta_ini --> 2^%d * beta_ini'%(i, i, i+1))
if print_steps:
print('----------------------------------------------------------------\n' +
u'beta_fin = %.3f'%beta_fin)
# Calcula traza de rho
trace_rho = np.trace(rho)*dx
return rho, trace_rho, beta_fin
def save_csv(data, data_headers=None, data_index=None, file_name=None,
relevant_info=None, print_data=True):
"""
Uso: data debe contener listas que serรกn las columnas de un archivo CSV que se guardarรก
con nombre file_name. relevant_info agrega comentarios en primeras lรญneas del
archivo.
Recibe:
data: array of arrays, shape=(nx,ny) -> cada columna es una columna del archivo.
data_headers: numpy array, shape=(ny,) -> nombres de las columnas
data_index: numpy array, shape=(nx,) -> nombres de las filas
file_name: str -> nombre del archivo en el que se guardarรกn datos.
relevant_info: list of str -> informaciรณn que se agrega como comentario en
primeras lรญneas. Cada elemento de esta lista
se agrega como una nueva lรญnea.
print_data: bool -> decide si imprime datos guardados, en pantalla.
Devuelve:
data_pdDF: pd.DataFrame -> archivo con datos formato "pandas data frame".
guarda archivo con datos e inforamaciรณn relevante en primera lรญnea.
"""
if file_name==None:
#path completa para este script
script_dir = os.path.dirname(os.path.abspath(__file__))
file_name = script_dir + '/' + 'file_name.csv'
data_pdDF = pd.DataFrame(data, columns=data_headers, index=data_index)
# Crea archivo CSV y agrega comentarios relevantes dados como input
if relevant_info is not None:
# Agregamos informaciรณn relevante en primeras lรญneas
with open(file_name,mode='w') as file_csv:
for info in list(relevant_info):
file_csv.write('# '+info+'\n')
file_csv.close()
# Usamos pandas para escribir en archivo en formato csv.
with open(file_name,mode='a') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
else:
with open(file_name,mode='w') as file_csv:
data_pdDF.to_csv(file_csv)
file_csv.close()
# Imprime datos en pantalla.
if print_data==True:
print(data_pdDF)
return data_pdDF
def run_pi_x_sq_trotter(x_max=5., nx=201, N_iter=7, beta_fin=4, potential=harmonic_potential,
potential_string='harmonic_potential', print_steps=True,
save_data=True, file_name=None, relevant_info=None,
plot=True, save_plot=True, show_plot=True):
"""
Uso: corre algoritmo matrix squaring iterativamente (N_iter veces). En la primera
iteraciรณn se usa una matriz densidad en aproximaciรณn de Trotter a temperatura
inversa beta_ini = beta_fin * 2**(-N_iter) para potencial dado por potential;
en las siguientes iteraciones se usa matriz densidad generada por la iteraciรณn
inmediatamente anterior. Ademรกs รฉsta funciรณn guarda datos de pi(x;beta) vs. x
en archivo de texto y grafica pi(x;beta) comparรกndolo con teorรญa para el oscilador
armรณnico cuรกntico.
Recibe:
x_max: float -> los valores de x estarรกn en el intervalo (-x_max,x_max).
nx: int -> nรบmero de valores de x considerados.
N_iter: int -> nรบmero de iteraciones del algoritmo matrix squaring.
beta_ini: float -> valor de inverso de temperatura que queremos tener al final de
aplicar el algoritmo matrix squaring iterativamente.
potential: func -> potencial de interacciรณn usado en aproximaciรณn de trotter. Debe
ser funciรณn de x.
potential_string: str -> nombre del potencial (con รฉste nombramos los archivos que
se generan).
print_steps: bool -> decide si imprime los pasos del algoritmo matrix squaring.
save_data: bool -> decide si guarda los datos en archivo .csv.
file_name: str -> nombre de archivo CSV en que se guardan datos. Si valor es None,
se guarda con nombre conveniente segรบn parรกmetros relevantes.
plot: bool -> decide si grafica.
save_plot: bool -> decide si guarda la figura.
show_plot: bool -> decide si muestra la figura en pantalla.
Devuelve:
rho: numpy array, shape=(nx,nx) -> matriz densidad de estado rho a temperatura
inversa igual a beta_fin.
trace_rho: float -> traza de la matriz densidad a temperatura inversa
igual a beta_fin. Por la definiciรณn que tomamos
de "rho", รฉsta es equivalente a la funciรณn
particiรณn en dicha temperatura.
grid_x: numpy array, shape=(nx,) -> valores de x en los que estรก evaluada rho.
"""
# Cรกlculo del valor de beta_ini segรบn valores beta_fin y N_iter dados como input
beta_ini = beta_fin * 2**(-N_iter)
# Cรกlculo de rho con aproximaciรณn de Trotter
rho, grid_x, dx = rho_trotter(x_max, nx, beta_ini, potential)
grid_x = np.array(grid_x)
# Aproximaciรณn de rho con matrix squaring iterado N_iter veces.
rho, trace_rho, beta_fin_2 = density_matrix_squaring(rho, grid_x, N_iter,
beta_ini, print_steps)
print('---------------------------------------------------------'
+ '---------------------------------------------------------\n'
+ u'Matrix squaring: beta_ini = %.3f --> beta_fin = %.3f'%(beta_ini, beta_fin_2)
+ u' N_iter = %d Z(beta_fin) = Tr(rho(beta_fin)) = %.3E \n'%(N_iter,trace_rho)
+ '---------------------------------------------------------'
+ '---------------------------------------------------------'
)
# Normalizaciรณn de rho a 1 y cรกlculo de densidades de probabilidad para valores en grid_x.
rho_normalized = np.copy(rho)/trace_rho
x_weights = np.diag(rho_normalized)
# Guarda datos en archivo CSV.
script_dir = os.path.dirname(os.path.abspath(__file__)) #path completa para este script
if save_data:
# Nombre del archivo .csv en el que guardamos valores de pi(x;beta_fin).
if file_name is None:
csv_file_name = (script_dir
+ u'/pi_x-ms-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.csv'
%(potential_string,beta_fin,x_max,nx,N_iter))
else:
csv_file_name = script_dir + '/' + file_name + '.csv'
# Informaciรณn relevante para agregar como comentario al archivo csv.
if relevant_info is None:
relevant_info = ['pi(x;beta_fin) computed using matrix squaring algorithm and'
+ ' Trotter approximation. Parameters:',
u'%s x_max = %.3f nx = %d '%(potential_string,x_max,nx)
+ u'N_iter = %d beta_ini = %.3f '%(N_iter,beta_ini,)
+ u'beta_fin = %.3f'%beta_fin]
# Guardamos valores de pi(x;beta_fin) en archivo csv.
pi_x_data = np.array([grid_x.copy(),x_weights.copy()])
pi_x_data_headers = ['position_x','prob_density']
pi_x_data = save_csv(pi_x_data.transpose(),pi_x_data_headers,None,csv_file_name,
relevant_info,print_data=0)
# Grรกfica y comparaciรณn con teorรญa
if plot:
plt.figure(figsize=(8,5))
plt.plot(grid_x, x_weights,
label = 'Matrix squaring +\nfรณrmula de Trotter.\n$N=%d$ iteraciones\n$dx=%.3E$'
%(N_iter,dx))
plt.plot(grid_x, QHO_canonical_ensemble(grid_x,beta_fin), label=u'Valor teรณrico QHO')
plt.xlabel(u'x')
plt.ylabel(u'$\pi^{(Q)}(x;\\beta)$')
plt.legend(loc='best',title=u'$\\beta=%.2f$'%beta_fin)
plt.tight_layout()
if save_plot:
if file_name is None:
plot_file_name = (script_dir
+ u'/pi_x-ms-plot-%s-beta_fin_%.3f-x_max_%.3f-nx_%d-N_iter_%d.eps'
%(potential_string,beta_fin,x_max,nx,N_iter))
else:
plot_file_name = script_dir+u'/pi_x-ms-plot-'+file_name+'.eps'
plt.savefig(plot_file_name)
if show_plot:
plt.show()
plt.close()
return rho, trace_rho, grid_x
def Z_several_values(temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
Z_file_name = None, relevant_info_Z = None, print_Z_data = True,
x_max=7., nx=201, N_iter=7, potential = harmonic_potential,
potential_string = 'harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot=False, save_plot=False, show_plot=False):
"""
Uso: calcula varios valores para la funciรณn particiรณn, Z, usando operador densidad
aproximado aproximado por el algoritmo matrix squaring.
Recibe:
temp_min: float -> Z se calcula para valores de beta en (1/temp_min,1/temp_max).
con N_temp valores igualmente espaciados.
temp_max: float.
N_temp: int.
save_Z_csv: bool -> decide si guarda valores calculados en archivo CSV.
Z_file_name: str -> nombre del archivo en el que se guardan datos de Z. Si valor
es None, se guarda con nombre conveniente segรบn parรกmetros
relevantes.
relevant_info_Z: list -> infrmaciรณn relevante se aรฑade en primeras lรญneas del archivo.
Cada str separada por una coma en la lista se aรฑade como una
nueva lรญnea.
print_Z_data: bool -> imprime datos de Z en pantalla.
*args: tuple -> argumentos de run_pi_x_sq_trotter
Devuelve:
Z_data: list, shape=(3,)
Z_data[0]: list, shape(N_temp,) -> contiene valores de beta en los que estรก evaluada Z.
Z_data[1]: list, shape(N_temp,) -> contiene valores de T en los que estรก evaluada Z.
Z_data[2]: list, shape(N_temp,) -> contiene valores de Z.
Z(beta) = Z(1/T) =
Z_data[0](Z_data[1]) = Z_data[0](Z_data[2])
"""
# Transforma valores de beta en valores de T y calcula lista de beta.
beta_max = 1./temp_min
beta_min = 1./temp_max
N_temp = int(N_temp)
beta_array = np.linspace(beta_max,beta_min,N_temp)
Z = []
# Calcula valores de Z para valores de beta especificados en beta_array.
for beta_fin in beta_array:
rho, trace_rho, grid_x = run_pi_x_sq_trotter(x_max, nx, N_iter, beta_fin, potential,
potential_string, print_steps,
save_pi_x_data, file_name,
relevant_info, plot, save_plot, show_plot)
Z.append(trace_rho)
# Calcula el output de la funciรณn.
Z_data = np.array([beta_array.copy(), 1./beta_array.copy(), Z.copy()], dtype=float)
# Guarda datos de Z en archivo CSV.
if save_Z_csv == True:
script_dir = os.path.dirname(os.path.abspath(__file__))
if Z_file_name is None:
Z_file_name = ('Z-ms-%s-beta_max_%.3f-'%(potential_string,1./temp_min)
+ 'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max)
+ 'nx_%d-N_iter_%d.csv'%(nx, N_iter))
Z_file_name = script_dir + '/' + Z_file_name
if relevant_info_Z is None:
relevant_info_Z = ['Partition function at several temperatures',
'%s beta_max = %.3f '%(potential_string,1./temp_min)
+ 'beta_min = %.3f N_temp = %d '%(1./temp_max,N_temp)
+ 'x_max = %.3f nx = %d N_iter = %d'%(x_max,nx, N_iter)]
Z_data_headers = ['beta', 'temperature', 'Z']
Z_data = save_csv(Z_data.transpose(), Z_data_headers, None, Z_file_name, relevant_info_Z,
print_data=False)
if print_Z_data == True:
print(Z_data)
return Z_data
def average_energy(read_Z_data=True, generate_Z_data=False, Z_file_name = None,
plot_energy=True, save_plot_E=True, show_plot_E=True,
E_plot_name=None,
temp_min=1./10, temp_max=1/2., N_temp=10, save_Z_csv=True,
relevant_info_Z=None, print_Z_data=True,
x_max=7., nx=201, N_iter=7, potential=harmonic_potential,
potential_string='harmonic_potential', print_steps=False,
save_pi_x_data=False, pi_x_file_name=None, relevant_info_pi_x=None,
plot_pi_x=False, save_plot_pi_x=False, show_plot_pi_x=False):
"""
Uso: calcula energรญa promedio, E, del sistema en cuestiรณn dado por potential.
Se puede decidir si se leen datos de funciรณn particiรณn o se generan,
ya que E = - (d/d beta )log(Z).
Recibe:
read_Z_data: bool -> decide si se leen datos de Z de un archivo con nombre
Z_file_name.
generate_Z_data: bool -> decide si genera datos de Z.
Nota: read_Z_data y generate_Z_data son excluyentes. Se analiza primero primera opciรณn
Z_file_name: str -> nombre del archivo en del que se leerรก o en el que se
guardarรกn datos de Z. Si valor es None, se guarda con nombre
conveniente segรบn parรกmetros relevantes.
plot_energy: bool -> decide si grรกfica energรญa.
save_plot_E: bool -> decide si guarda grรกfica de energรญa. Nรณtese que si
plot_energy=False, no se generarรก grรกfica.
show_plot_E: bool -> decide si muestra grรกfica de E en pantalla
E_plot_name: str -> nombre para guardar grรกfico de E.
*args: tuple -> argumentos de Z_several_values
Devuelve:
E_avg: list -> valores de energรญa promedio para beta especificados por
beta__read
beta_read: list
"""
# Decide si lee o genera datos de Z.
if read_Z_data:
Z_file_read = pd.read_csv(Z_file_name, index_col=0, comment='#')
elif generate_Z_data:
t_0 = time()
Z_data = Z_several_values(temp_min, temp_max, N_temp, save_Z_csv, Z_file_name,
relevant_info_Z, print_Z_data, x_max, nx, N_iter, potential,
potential_string, print_steps, save_pi_x_data, pi_x_file_name,
relevant_info_pi_x, plot_pi_x,save_plot_pi_x, show_plot_pi_x)
t_1 = time()
print('--------------------------------------------------------------------------\n'
+ '%d values of Z(beta) generated --> %.3f sec.'%(N_temp,t_1-t_0))
Z_file_read = Z_data
else:
print('Elegir si se generan o se leen los datos para la funciรณn particiรณn, Z.\n'
+ 'Estas opciones son mutuamente exluyentes. Si se seleccionan las dos, el'
+ 'algoritmo escoge leer los datos.')
beta_read = Z_file_read['beta']
temp_read = Z_file_read['temperature']
Z_read = Z_file_read['Z']
# Calcula energรญa promedio.
E_avg = np.gradient(-np.log(Z_read),beta_read)
# Grafica.
if plot_energy:
plt.figure(figsize=(8,5))
plt.plot(temp_read,E_avg,label=u'$\langle E \\rangle$ via path integral\nnaive sampling')
plt.plot(temp_read,E_QHO_avg_theo(beta_read),label=u'$\langle E \\rangle$ teรณrico')
plt.legend(loc='best')
plt.xlabel(u'$T$')
plt.ylabel(u'$\langle E \\rangle$')
if save_plot_E:
script_dir = os.path.dirname(os.path.abspath(__file__))
if E_plot_name is None:
E_plot_name = ('E-ms-plot-%s-beta_max_%.3f-'%(potential_string,1./temp_min)
+ 'beta_min_%.3f-N_temp_%d-x_max_%.3f-'%(1./temp_max,N_temp,x_max)
+ 'nx_%d-N_iter_%d.eps'%(nx, N_iter))
E_plot_name = script_dir + '/' + E_plot_name
plt.savefig(E_plot_name)
if show_plot_E:
plt.show()
plt.close()
return E_avg, beta_read.to_numpy()
def calc_error(x,xp,dx):
"""
Uso: calcula error acumulado en cรกlculo computacional de pi(x;beta) comparado
con valor teรณrico
"""
x, xp = np.array(x), np.array(xp)
N = len(x)
if N != len(xp):
raise Exception('x y xp deben ser del mismo tamaรฑo.')
else:
return np.sum(np.abs(x-xp))*dx
def optimization(generate_opt_data=True, read_opt_data=False, beta_fin=4, x_max=5,
potential=harmonic_potential, potential_string='harmonic_potential',
nx_min=50, nx_max=1000, nx_sampling=50, N_iter_min=1, N_iter_max=20,
save_opt_data=False, opt_data_file_name=None, plot=True,
show_plot=True, save_plot=True, opt_plot_file_name=None):
"""
Uso: calcula diferentes valores de error usando calc_error() para encontrar valores de
dx y beta_ini รณptimos para correr el alcoritmo (รณptimos = que minimicen error)
Recibe:
generate_opt_data: bool -> decide si genera datos para optimizaciรณn.
read_opt_data: bool -> decide si lee datos para optimizaciรณn.
Nota: generate_opt_data y read_opt_data son excluyentes. Se evalรบa primero la primera.
nx_min: int
nx_max: int -> se relaciona con dx = 2*x_max/(nx-1).
nx_sampling: int -> se generan nx mediante range(nx_max,nx_min,-1*nx_sampling).
N_iter_min: int
N_iter_max: int -> se relaciona con beta_ini = beta_fin **(-N_iter). Se gereran
valores de N_iter con range(N_iter_max,N_iter_min-1,-1).
save_opt_data: bool -> decide si guarda datos de optimizaciรณn en archivo CSV.
opt_data_file_name: str -> nombre de archivo para datos de optimizaciรณn.
plot: bool -> decide si grafica optimizaciรณn.
show_plot: bool -> decide si muestra optimizaciรณn.
save_plot: bool -> decide si guarda optimizaciรณn.
opt_plot_file_name: str -> nombre de grรกfico de optimizaciรณn. Si valor es None, se
guarda con nombre conveniente segรบn parรกmetros relevantes.
Devuelve:
error: list, shape=(nb,ndx) -> valores de calc_error para diferentes valores de dx y
beta_ini. dx incrementa de izquierda a derecha en lista
y beta_ini incrementa de arriba a abajo.
dx_grid: list, shape=(ndx,) -> valores de dx para los que se calcula error.
beta-ini_grid: list, shape=(nb,) -> valores de beta_ini para los que se calcula error.
"""
# Decide si genera o lee datos.
if generate_opt_data:
N_iter_min = int(N_iter_min)
N_iter_max = int(N_iter_max)
nx_min = int(nx_min)
nx_max = int(nx_max)
if nx_min%2==1:
nx_min -= 1
if nx_max%2==0:
nx_max += 1
# Crea valores de nx y N_iter (equivalente a generar valores de dx y beta_ini)
nx_values = range(nx_max,nx_min,-1*nx_sampling)
N_iter_values = range(N_iter_max,N_iter_min-1,-1)
dx_grid = [2*x_max/(nx-1) for nx in nx_values]
beta_ini_grid = [beta_fin * 2**(-N_iter) for N_iter in N_iter_values]
error = []
# Calcula error para cada valor de nx y N_iter especificado
# (equivalentemente dx y beta_ini).
for N_iter in N_iter_values:
row = []
for nx in nx_values:
rho,trace_rho,grid_x = run_pi_x_sq_trotter(x_max, nx, N_iter, beta_fin,
potential, potential_string,
False, False, None, None, False,
False, False)
grid_x = np.array(grid_x)
dx = grid_x[1]-grid_x[0]
rho_normalized = np.copy(rho)/trace_rho
pi_x = np.diag(rho_normalized)
theoretical_pi_x = QHO_canonical_ensemble(grid_x,beta_fin)
error_comp_theo = calc_error(pi_x,theoretical_pi_x,dx)
row.append(error_comp_theo)
error.append(row)
#error = np.array(error)
elif read_opt_data:
error = pd.read_csv(opt_data_file_name, index_col=0, comment='#')
dx_grid = error.columns.to_numpy()
beta_ini_grid = error.index.to_numpy()
error = error.to_numpy()
else:
raise Exception('Escoja si generar o leer datos en optimization(.)')
#print(error)
error = np.array(error)
# Toma valores de error en cรกlculo de Z (nan e inf) y los remplaza por
# el valor de mayor error en el grรกfico.
try:
error = np.where(np.isinf(error),-np.Inf,error)
error = np.where(np.isnan(error),-np.Inf,error)
nan_value = 1.1*max(error)
except:
nan_value = 0
error = np.nan_to_num(error, nan=nan_value, posinf=nan_value, neginf=nan_value)
script_dir = os.path.dirname(os.path.abspath(__file__))
# Guarda datos (solo si fueron generados y se escoje guardar)
if generate_opt_data and save_opt_data:
if opt_data_file_name is None:
opt_data_file_name = ('pi_x-ms-opt-%s-beta_fin_%.3f'%(potential_string, beta_fin)
+ '-x_max_%.3f-nx_min_%d-nx_max_%d'%(x_max, nx_min, nx_max)
+ '-nx_sampling_%d-N_iter_min_%d'%(nx_sampling, N_iter_min)
+ '-N_iter_max_%d.csv'%(N_iter_max))
opt_data_file_name = script_dir + '/' + opt_data_file_name
relevant_info = ['Optimization of parameters dx and beta_ini of matrix squaring'
+ ' algorithm', '%s beta_fin = %.3f '%(potential_string, beta_fin)
+ 'x_max = %.3f nx_min = %d nx_max = %d '%(x_max, nx_min, nx_max)
+ 'nx_sampling = %d N_iter_min = %d '%(nx_sampling, N_iter_min)
+ 'N_iter_max = %d'%(N_iter_max)]
save_csv(error, dx_grid, beta_ini_grid, opt_data_file_name, relevant_info)
# Grafica
if plot:
fig, ax = plt.subplots(1, 1)
DX, BETA_INI = np.meshgrid(dx_grid, beta_ini_grid)
cp = plt.contourf(DX,BETA_INI,error)
plt.colorbar(cp)
ax.set_ylabel(u'$\\beta_{ini}$')
ax.set_xlabel('$dx$')
plt.tight_layout()
if save_plot:
if opt_plot_file_name is None:
opt_plot_file_name = \
('pi_x-ms-opt-plot-%s-beta_fin_%.3f'%(potential_string, beta_fin)
+ '-x_max_%.3f-nx_min_%d-nx_max_%d'%(x_max, nx_min, nx_max)
+ '-nx_sampling_%d-N_iter_min_%d'%(nx_sampling, N_iter_min)
+ '-N_iter_max_%d.eps'%(N_iter_max))
opt_plot_file_name = script_dir + '/' + opt_plot_file_name
plt.savefig(opt_plot_file_name)
if show_plot:
plt.show()
plt.close()
return error, dx_grid, beta_ini_grid
#################################################################################################
# PANEL DE CONTROL
#
# Decide si corre algoritmo matrix squaring
run_ms_algorithm = False
# Decide si corre algoritmo para cรกlculo de energรญa interna
run_avg_energy = False
# Decide si corre algoritmo para optimizaciรณn de dx y beta_ini
run_optimization = True
#
#
#################################################################################################
#################################################################################################
# PARรMETROS GENERALES PARA LAS FIGURAS
#
# Usar latex en texto de figuras y agrandar tamaรฑo de fuente
plt.rc('text', usetex=True)
plt.rcParams.update({'font.size':15,'text.latex.unicode':True})
# Obtenemos path para guardar archivos en el mismo directorio donde se ubica el script
script_dir = os.path.dirname(os.path.abspath(__file__))
#
#################################################################################################
#################################################################################################
# CORRE ALGORITMO MATRIX SQUARING
#
# Parรกmetros fรญsicos del algoritmo
x_max = 5.
nx = 201
N_iter = 7
beta_fin = 4
potential, potential_string = harmonic_potential, 'harmonic_potential'
# Parรกmetros tรฉcnicos
print_steps = False
save_data = False
file_name = None
relevant_info = None
plot = True
save_plot = False
show_plot = True
if run_ms_algorithm:
rho, trace_rho, grid_x = run_pi_x_sq_trotter(x_max, nx, N_iter, beta_fin, potential,
potential_string, print_steps, save_data,
file_name, relevant_info, plot,
save_plot, show_plot)
#
#
#################################################################################################
#################################################################################################
# CORRE ALGORITMO PARA CรLCULO DE ENERGรA INTERNA
#
# Parรกmetros tรฉcnicos funciรณn particiรณn y cรกlculo de energรญa
read_Z_data = False
generate_Z_data = True
Z_file_name = None
plot_energy = True
save_plot_E = True
show_plot_E = True
E_plot_name = None
# Parรกmetros fรญsicos para calcular Z y <E>
temp_min = 1./10
temp_max = 1./2
N_temp = 10
potential, potential_string = harmonic_potential, 'harmonic_potential'
# Mรกs parรกmetros tรฉcnicos
save_Z_csv = True
relevant_info_Z = None
print_Z_data = False
x_max = 7.
nx = 201
N_iter = 7
print_steps = False
save_pi_x_data = False
pi_x_file_name = None
relevant_info_pi_x = None
plot_pi_x = False
save_plot_pi_x = False
show_plot_pi_x = False
if run_avg_energy:
average_energy(read_Z_data, generate_Z_data, Z_file_name, plot_energy, save_plot_E,
show_plot_E, E_plot_name,
temp_min, temp_max, N_temp, save_Z_csv, relevant_info_Z, print_Z_data,
x_max, nx, N_iter, potential, potential_string, print_steps, save_pi_x_data,
pi_x_file_name, relevant_info_pi_x,plot_pi_x, save_plot_pi_x, show_plot_pi_x)
#
#
#################################################################################################
#################################################################################################
# CORRE ALGORITMO PARA OPTIMIZACIรN DE DX Y BETA_INI
#
# Parรกmetros fรญsicos
beta_fin = 4
x_max = 5
potential, potential_string = harmonic_potential, 'harmonic_potential'
nx_min = 10
nx_max = 310
nx_sampling = 60
N_iter_min = 8
N_iter_max = 20
# Parรกmetros tรฉcnicos
generate_opt_data = True
read_opt_data = False
save_opt_data = True
opt_data_file_name = 'opt-test-1.csv' # '/pi_x-ms-opt-harmonic_potential-beta_fin_4.000-x_max_5.000-nx_min_10-nx_max_1001-nx_sampling_50-N_iter_min_1-N_iter_max_20.csv'
plot_opt = True
show_opt_plot = True
save_plot_opt = True
opt_plot_file_name = 'opt-test-1.png' # '/pi_x-ms-opt-plot-harmonic_potential-beta_fin_4.000-x_max_5.000-nx_min_10-nx_max_1001-nx_sampling_50-N_iter_min_1-N_iter_max_20.eps'
if run_optimization:
t_0 = time()
error, dx_grid, beta_ini_grid = \
optimization(generate_opt_data, read_opt_data, beta_fin, x_max, potential,
potential_string, nx_min, nx_max, nx_sampling, N_iter_min,
N_iter_max, save_opt_data, opt_data_file_name, plot_opt,
show_opt_plot, save_plot_opt, opt_plot_file_name)
t_1 = time()
print('-----------------------------------------'
+ '-----------------------------------------\n'
+ 'Optimization: beta_fin=%.3f, x_max=%.3f, potential=%s\n \
nx_min=%d, nx_max=%d, N_iter_min=%d, N_iter_max=%d\n \
computation time = %.3f sec.\n'%(beta_fin,x_max,potential_string,nx_min,
nx_max,N_iter_min,N_iter_max,t_1-t_0)
+ '-----------------------------------------'
+ '-----------------------------------------')
#
#
################################################################################################# | [
"[email protected]"
] | |
143015e6f533e9a7ff15567a923069a6255caf56 | 4ba0b403637e7aa3e18c9bafae32034e3c394fe4 | /python/proto/google/protobuf/internal/type_checkers.py | c88b8d028eb494464a1dd6e1420d1b256e13d83d | [] | no_license | ASMlover/study | 3767868ddae63ac996e91b73700d40595dd1450f | 1331c8861fcefbef2813a2bdd1ee09c1f1ee46d6 | refs/heads/master | 2023-09-06T06:45:45.596981 | 2023-09-01T08:19:49 | 2023-09-01T08:19:49 | 7,519,677 | 23 | 6 | null | null | null | null | UTF-8 | Python | false | false | 11,442 | py | # Protocol Buffers - Google's data interchange format
# Copyright 2008 Google Inc. All rights reserved.
# http://code.google.com/p/protobuf/
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
"""Provides type checking routines.
This module defines type checking utilities in the forms of dictionaries:
VALUE_CHECKERS: A dictionary of field types and a value validation object.
TYPE_TO_BYTE_SIZE_FN: A dictionary with field types and a size computing
function.
TYPE_TO_SERIALIZE_METHOD: A dictionary with field types and serialization
function.
FIELD_TYPE_TO_WIRE_TYPE: A dictionary with field typed and their
coresponding wire types.
TYPE_TO_DESERIALIZE_METHOD: A dictionary with field types and deserialization
function.
"""
__author__ = '[email protected] (Will Robinson)'
from google.protobuf.internal import decoder
from google.protobuf.internal import encoder
from google.protobuf.internal import wire_format
from google.protobuf.internal.utils import unicode, long
from google.protobuf import descriptor
_FieldDescriptor = descriptor.FieldDescriptor
def GetTypeChecker(cpp_type, field_type):
"""Returns a type checker for a message field of the specified types.
Args:
cpp_type: C++ type of the field (see descriptor.py).
field_type: Protocol message field type (see descriptor.py).
Returns:
An instance of TypeChecker which can be used to verify the types
of values assigned to a field of the specified type.
"""
if (cpp_type == _FieldDescriptor.CPPTYPE_STRING and
field_type == _FieldDescriptor.TYPE_BYTES):
return TypeChecker(bytes)
return _VALUE_CHECKERS[cpp_type]
# None of the typecheckers below make any attempt to guard against people
# subclassing builtin types and doing weird things. We're not trying to
# protect against malicious clients here, just people accidentally shooting
# themselves in the foot in obvious ways.
class TypeChecker(object):
"""Type checker used to catch type errors as early as possible
when the client is setting scalar fields in protocol messages.
"""
def __init__(self, *acceptable_types):
self._acceptable_types = acceptable_types
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, self._acceptable_types):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), self._acceptable_types))
raise TypeError(message)
# IntValueChecker and its subclasses perform integer type-checks
# and bounds-checks.
class IntValueChecker(object):
"""Checker used for integer fields. Performs type-check and range check."""
def CheckValue(self, proposed_value):
if not isinstance(proposed_value, (int, long)):
message = ('%.1024r has type %s, but expected one of: %s' %
(proposed_value, type(proposed_value), (int, long)))
raise TypeError(message)
if not self._MIN <= proposed_value <= self._MAX:
raise ValueError('Value out of range: %d' % proposed_value)
class Int32ValueChecker(IntValueChecker):
# We're sure to use ints instead of longs here since comparison may be more
# efficient.
_MIN = -2147483648
_MAX = 2147483647
class Uint32ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 32) - 1
class Int64ValueChecker(IntValueChecker):
_MIN = -(1 << 63)
_MAX = (1 << 63) - 1
class Uint64ValueChecker(IntValueChecker):
_MIN = 0
_MAX = (1 << 64) - 1
# Type-checkers for all scalar CPPTYPEs.
_VALUE_CHECKERS = {
_FieldDescriptor.CPPTYPE_INT32: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_INT64: Int64ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT32: Uint32ValueChecker(),
_FieldDescriptor.CPPTYPE_UINT64: Uint64ValueChecker(),
_FieldDescriptor.CPPTYPE_DOUBLE: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_FLOAT: TypeChecker(
float, int, long),
_FieldDescriptor.CPPTYPE_BOOL: TypeChecker(bool, int),
_FieldDescriptor.CPPTYPE_ENUM: Int32ValueChecker(),
_FieldDescriptor.CPPTYPE_STRING: TypeChecker(unicode),
}
# Map from field type to a function F, such that F(field_num, value)
# gives the total byte size for a value of the given type. This
# byte size includes tag information and any other additional space
# associated with serializing "value".
TYPE_TO_BYTE_SIZE_FN = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.DoubleByteSize,
_FieldDescriptor.TYPE_FLOAT: wire_format.FloatByteSize,
_FieldDescriptor.TYPE_INT64: wire_format.Int64ByteSize,
_FieldDescriptor.TYPE_UINT64: wire_format.UInt64ByteSize,
_FieldDescriptor.TYPE_INT32: wire_format.Int32ByteSize,
_FieldDescriptor.TYPE_FIXED64: wire_format.Fixed64ByteSize,
_FieldDescriptor.TYPE_FIXED32: wire_format.Fixed32ByteSize,
_FieldDescriptor.TYPE_BOOL: wire_format.BoolByteSize,
_FieldDescriptor.TYPE_STRING: wire_format.StringByteSize,
_FieldDescriptor.TYPE_GROUP: wire_format.GroupByteSize,
_FieldDescriptor.TYPE_MESSAGE: wire_format.MessageByteSize,
_FieldDescriptor.TYPE_BYTES: wire_format.BytesByteSize,
_FieldDescriptor.TYPE_UINT32: wire_format.UInt32ByteSize,
_FieldDescriptor.TYPE_ENUM: wire_format.EnumByteSize,
_FieldDescriptor.TYPE_SFIXED32: wire_format.SFixed32ByteSize,
_FieldDescriptor.TYPE_SFIXED64: wire_format.SFixed64ByteSize,
_FieldDescriptor.TYPE_SINT32: wire_format.SInt32ByteSize,
_FieldDescriptor.TYPE_SINT64: wire_format.SInt64ByteSize
}
# Maps from field types to encoder constructors.
TYPE_TO_ENCODER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleEncoder,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatEncoder,
_FieldDescriptor.TYPE_INT64: encoder.Int64Encoder,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Encoder,
_FieldDescriptor.TYPE_INT32: encoder.Int32Encoder,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Encoder,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Encoder,
_FieldDescriptor.TYPE_BOOL: encoder.BoolEncoder,
_FieldDescriptor.TYPE_STRING: encoder.StringEncoder,
_FieldDescriptor.TYPE_GROUP: encoder.GroupEncoder,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageEncoder,
_FieldDescriptor.TYPE_BYTES: encoder.BytesEncoder,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Encoder,
_FieldDescriptor.TYPE_ENUM: encoder.EnumEncoder,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Encoder,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Encoder,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Encoder,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Encoder,
}
# Maps from field types to sizer constructors.
TYPE_TO_SIZER = {
_FieldDescriptor.TYPE_DOUBLE: encoder.DoubleSizer,
_FieldDescriptor.TYPE_FLOAT: encoder.FloatSizer,
_FieldDescriptor.TYPE_INT64: encoder.Int64Sizer,
_FieldDescriptor.TYPE_UINT64: encoder.UInt64Sizer,
_FieldDescriptor.TYPE_INT32: encoder.Int32Sizer,
_FieldDescriptor.TYPE_FIXED64: encoder.Fixed64Sizer,
_FieldDescriptor.TYPE_FIXED32: encoder.Fixed32Sizer,
_FieldDescriptor.TYPE_BOOL: encoder.BoolSizer,
_FieldDescriptor.TYPE_STRING: encoder.StringSizer,
_FieldDescriptor.TYPE_GROUP: encoder.GroupSizer,
_FieldDescriptor.TYPE_MESSAGE: encoder.MessageSizer,
_FieldDescriptor.TYPE_BYTES: encoder.BytesSizer,
_FieldDescriptor.TYPE_UINT32: encoder.UInt32Sizer,
_FieldDescriptor.TYPE_ENUM: encoder.EnumSizer,
_FieldDescriptor.TYPE_SFIXED32: encoder.SFixed32Sizer,
_FieldDescriptor.TYPE_SFIXED64: encoder.SFixed64Sizer,
_FieldDescriptor.TYPE_SINT32: encoder.SInt32Sizer,
_FieldDescriptor.TYPE_SINT64: encoder.SInt64Sizer,
}
# Maps from field type to a decoder constructor.
TYPE_TO_DECODER = {
_FieldDescriptor.TYPE_DOUBLE: decoder.DoubleDecoder,
_FieldDescriptor.TYPE_FLOAT: decoder.FloatDecoder,
_FieldDescriptor.TYPE_INT64: decoder.Int64Decoder,
_FieldDescriptor.TYPE_UINT64: decoder.UInt64Decoder,
_FieldDescriptor.TYPE_INT32: decoder.Int32Decoder,
_FieldDescriptor.TYPE_FIXED64: decoder.Fixed64Decoder,
_FieldDescriptor.TYPE_FIXED32: decoder.Fixed32Decoder,
_FieldDescriptor.TYPE_BOOL: decoder.BoolDecoder,
_FieldDescriptor.TYPE_STRING: decoder.StringDecoder,
_FieldDescriptor.TYPE_GROUP: decoder.GroupDecoder,
_FieldDescriptor.TYPE_MESSAGE: decoder.MessageDecoder,
_FieldDescriptor.TYPE_BYTES: decoder.BytesDecoder,
_FieldDescriptor.TYPE_UINT32: decoder.UInt32Decoder,
_FieldDescriptor.TYPE_ENUM: decoder.EnumDecoder,
_FieldDescriptor.TYPE_SFIXED32: decoder.SFixed32Decoder,
_FieldDescriptor.TYPE_SFIXED64: decoder.SFixed64Decoder,
_FieldDescriptor.TYPE_SINT32: decoder.SInt32Decoder,
_FieldDescriptor.TYPE_SINT64: decoder.SInt64Decoder,
}
# Maps from field type to expected wiretype.
FIELD_TYPE_TO_WIRE_TYPE = {
_FieldDescriptor.TYPE_DOUBLE: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FLOAT: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_INT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_UINT64: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_INT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_FIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_FIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_BOOL: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_STRING:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_GROUP: wire_format.WIRETYPE_START_GROUP,
_FieldDescriptor.TYPE_MESSAGE:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_BYTES:
wire_format.WIRETYPE_LENGTH_DELIMITED,
_FieldDescriptor.TYPE_UINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_ENUM: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SFIXED32: wire_format.WIRETYPE_FIXED32,
_FieldDescriptor.TYPE_SFIXED64: wire_format.WIRETYPE_FIXED64,
_FieldDescriptor.TYPE_SINT32: wire_format.WIRETYPE_VARINT,
_FieldDescriptor.TYPE_SINT64: wire_format.WIRETYPE_VARINT,
}
| [
"[email protected]"
] | |
001b2620fba71bf6672fffa7ba7578a6ef07180f | c838b0eaf08c63284bd29442f8a0a297d1558fd5 | /lagom/runner/rolling_segment_runner.py | 6f42c595b4aa4688010ec63e73d889e6d6601428 | [
"MIT"
] | permissive | vin136/lagom | ccd0f4a3e469c1ee8ef88b1f5248e712b51c5704 | 54e1890e6450f4b1bf499a838963c5d1a3b2da6a | refs/heads/master | 2020-04-22T21:45:51.488458 | 2019-02-13T16:41:32 | 2019-02-13T16:41:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,630 | py | import torch
import numpy as np
from lagom.envs import EnvSpec
from lagom.history import BatchSegment
from lagom.runner import BaseRunner
class RollingSegmentRunner(BaseRunner):
def __init__(self, config, agent, env):
super().__init__(config, agent, env)
self.env_spec = EnvSpec(self.env)
self.obs_buffer = None # for next call
self.done_buffer = None # masking
def __call__(self, T, reset=False):
D = BatchSegment(self.env_spec, T)
if self.obs_buffer is None or reset:
obs = self.env.reset()
# reset agent: e.g. RNN states because initial observation
self.agent.reset(self.config)
else:
obs = self.obs_buffer
D.add_observation(0, obs)
for t in range(T):
info = {}
out_agent = self.agent.choose_action(obs, info=info)
action = out_agent.pop('action')
if torch.is_tensor(action):
raw_action = list(action.detach().cpu().numpy())
else:
raw_action = action
D.add_action(t, raw_action)
obs, reward, done, info = self.env.step(raw_action)
D.add_observation(t+1, obs)
D.add_reward(t, reward)
D.add_done(t, done)
D.add_info(info)
# Record other information: e.g. log-probability of action, policy entropy
D.add_batch_info(out_agent)
self.obs_buffer = obs
self.done_buffer = done
return D
| [
"[email protected]"
] | |
f32c47e27a4ed6dfd41d6154529bbafd699be8a3 | a38eb2457fc834109cbb1d5cd2f5649d932cbb00 | /blog/migrations/0001_initial.py | 7115d435920cd0178d44e08eebf29f71a8ba7649 | [] | no_license | anitalmada/mi-primer-blog | 0a5daef6aa87273ab4ab09c1f6d1ff51706fdae2 | 872709baeef37c30608231a746d749c9d11afb0e | refs/heads/master | 2020-06-10T01:47:37.291475 | 2017-10-09T00:16:38 | 2017-10-09T00:16:38 | 76,130,283 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,049 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-12-10 15:47
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Posts',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('text', models.TextField()),
('created_date', models.DateTimeField(default=django.utils.timezone.now)),
('published_date', models.DateTimeField(blank=True, null=True)),
('author', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"[email protected]"
] | |
fa3c81e8655206c62c0e1c0548c42cc70ab771ae | c1261b9181d86c418df612dc809af933cfbb2c0d | /blog1/migrations/0003_auto_20190616_1338.py | c8a7d136c991ab23980daf09e24fe9de1d3d62e1 | [] | no_license | gitlGl/myblog | 122a598407d12a7397420ce50f9c1ca68a3107d2 | b3d7d1130e81ca625cb9d2b7204e19da6efe7d07 | refs/heads/master | 2023-09-01T14:06:04.720407 | 2022-10-22T08:47:02 | 2022-10-22T08:47:02 | 198,171,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 821 | py | # Generated by Django 2.1 on 2019-06-16 05:38
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blog1', '0002_auto_20190616_1334'),
]
operations = [
migrations.CreateModel(
name='Page',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(default='title', max_length=20)),
('content', models.TextField()),
],
),
migrations.RemoveField(
model_name='students',
name='sgrade',
),
migrations.DeleteModel(
name='Grades',
),
migrations.DeleteModel(
name='Students',
),
]
| [
"[email protected]"
] | |
21d731df849e297716c68f2ae4a7a7a0777d9eed | 2d9a3ce2a04190d0032e8a298829022260b1d76b | /indra/preassembler/refinement.py | 25da61e0baae4308ffbcc5076b86d3d1879d0344 | [
"BSD-2-Clause",
"BSD-2-Clause-Views"
] | permissive | sorgerlab/indra | f127a0f9bdd2d3f48df14575883fd31e2f4de4bf | 6d6ca1174792b6c5a05cbf3afcb9f138fabcec6a | refs/heads/master | 2023-08-21T13:25:54.654995 | 2023-06-11T16:46:41 | 2023-06-11T16:46:41 | 22,848,436 | 158 | 61 | BSD-2-Clause | 2023-08-30T21:47:59 | 2014-08-11T17:44:05 | Python | UTF-8 | Python | false | false | 17,262 | py | """This module implements classes and functions that are used for
finding refinements between INDRA Statements as part of the
knowledge-assembly process. These are imported by the preassembler
module."""
__all__ = ['get_agent_key', 'get_relevant_keys', 'RefinementFilter',
'RefinementConfirmationFilter', 'OntologyRefinementFilter',
'SplitGroupFilter', 'default_refinement_fun']
import time
import logging
import collections
from indra.statements import Event
from indra.statements import stmt_type as indra_stmt_type
logger = logging.getLogger(__name__)
# TODO: we could make the agent key function parameterizable with the
# preassembler to allow custom agent mappings to the ontology.
def get_agent_key(agent):
"""Return a key for an Agent for use in refinement finding.
Parameters
----------
agent : indra.statements.Agent or None
An INDRA Agent whose key should be returned.
Returns
-------
tuple or None
The key that maps the given agent to the ontology, with special
handling for ungrounded and None Agents.
"""
if isinstance(agent, Event):
agent = agent.concept
if agent is None:
agent_key = None
else:
agent_key = agent.get_grounding()
if not agent_key[0]:
agent_key = ('NAME', agent.name)
return agent_key
def get_relevant_keys(agent_key, all_keys_for_role, ontology, direction):
"""Return relevant agent keys for an agent key for refinement finding.
Parameters
----------
agent_key : tuple or None
An agent key of interest.
all_keys_for_role : set
The set of all agent keys in a given statement corpus with a
role matching that of the given agent_key.
ontology : indra.ontology.IndraOntology
An IndraOntology instance with respect to which relevant other
agent keys are found for the purposes of refinement.
direction: str
The direction in which to find relevant agents. The two options
are 'less_specific' and 'more_specific' for agents that are less and
more specific, per the ontology, respectively.
Returns
-------
set
The set of relevant agent keys which this given agent key can
possibly refine.
"""
rel_fun = ontology.get_parents if direction == 'less_specific' else \
ontology.get_children
relevant_keys = {None, agent_key}
if agent_key is not None:
relevant_keys |= set(rel_fun(*agent_key))
relevant_keys &= all_keys_for_role
return relevant_keys
class RefinementFilter:
"""A filter which is applied to one or more statements to eliminate
candidate refinements that are not possible according to some
criteria. By applying a series of such filters, the preassembler can avoid
doing n-by-n comparisons to determine refinements among n statements.
The filter class can take any number of constructor arguments that it
needs to perform its task. The base class' constructor initializes
a shared_data attribute as an empty dict.
It also needs to implement an initialize function which is called
with a stmts_by_hash argument, containing a dict of statements keyed by
hash. This function can build any data structures that may be needed
to efficiently apply the filter later. It cab store any
such data structures in the shared_data dict to be accessed by
other functions later.
Finally, the class needs to implement a get_related function, which
takes a single INDRA Statement as input to return the hashes of
potentially related other statements that the filter was initialized
with. The function also needs to take a possibly_related argument
which is either None (no other filter was run before) or a set,
which is the superset of possible relations as determined by some
other previously applied filter.
"""
def __init__(self):
self.shared_data = {}
def initialize(self, stmts_by_hash):
"""Initialize the filter class with a set of statements.
The filter can build up some useful data structures in this
function before being applied to any specific statements.
Parameters
----------
stmts_by_hash : dict[int, indra.statements.Statement]
A dict of statements keyed by their hashes.
"""
self.shared_data['stmts_by_hash'] = stmts_by_hash
def get_related(self, stmt, possibly_related=None,
direction='less_specific'):
"""Return a set of statement hashes that a given statement is
potentially related to.
Parameters
----------
stmt : indra.statements.Statement
The INDRA statement whose potential relations we want to filter.
possibly_related : set or None
A set of statement hashes that this statement is potentially
related to, as determined by some other filter. If this parameter
is a set (including an empty set), this function should return
a subset of it (intuitively, this filter can only further eliminate
some of the potentially related hashes that were previously
determined to be potential relations). If this argument is
None, the function must assume that no previous filter
was run before, and should therefore return all the possible
relations that it determines.
direction : str
One of 'less_specific' or 'more_specific. Since refinements
are directed relations, this function can operate in two
different directions: it can either find less specific
potentially related stateemnts, or it can find more specific
potentially related statements, as determined by this argument.
Returns
-------
set of int
A set of INDRA Statement hashes that are potentially related
to the given statement.
"""
raise NotImplementedError('The filter class has to implement a'
'get_related method.')
def get_more_specifics(self, stmt, possibly_related=None):
"""Return a set of hashes of statements that are potentially related
and more specific than the given statement."""
return self.get_related(stmt, possibly_related=possibly_related,
direction='more_specific')
def get_less_specifics(self, stmt, possibly_related=None):
"""Return a set of hashes of statements that are potentially related
and less specific than the given statement."""
return self.get_related(stmt, possibly_related=possibly_related,
direction='less_specific')
def extend(self, stmts_by_hash):
"""Extend the initial data structures with a set of new statements.
Parameters
----------
stmts_by_hash : dict[int, indra.statements.Statement]
A dict of statements keyed by their hashes.
"""
# We can assume that these stmts_by_hash are unique
self.shared_data['stmts_by_hash'].update(stmts_by_hash)
class OntologyRefinementFilter(RefinementFilter):
"""This filter uses an ontology to position statements and their agents
to filter down significantly on the set of possible relations for
a given statement.
Parameters
----------
ontology : indra.ontology.OntologyGraph
An INDRA ontology graph.
"""
def __init__(self, ontology):
super().__init__()
self.ontology = ontology
def initialize(self, stmts_by_hash):
self.shared_data['stmts_by_hash'] = {}
self.extend(stmts_by_hash)
def extend(self, stmts_by_hash):
self.shared_data['stmts_by_hash'].update(stmts_by_hash)
# Build up data structure of statement hashes by
# statement type
stmts_by_type = collections.defaultdict(set)
for stmt_hash, stmt in stmts_by_hash.items():
stmts_by_type[indra_stmt_type(stmt)].add(stmt_hash)
stmts_by_type = dict(stmts_by_type)
# Now iterate over each statement type and build up
# data structures for quick filtering
for stmt_type, stmts_this_type in stmts_by_type.items():
# Step 1. initialize data structures
# noinspection PyProtectedMember
roles = stmts_by_hash[next(iter(stmts_this_type))]._agent_order
if stmt_type not in self.shared_data:
self.shared_data[stmt_type] = {}
# Mapping agent keys to statement hashes
self.shared_data[stmt_type]['agent_key_to_hash'] = \
{role: collections.defaultdict(set) for role in roles}
# Mapping statement hashes to agent keys
self.shared_data[stmt_type]['hash_to_agent_key'] = \
{role: collections.defaultdict(set) for role in roles}
# All agent keys for a given agent role
self.shared_data[stmt_type]['all_keys_by_role'] = {}
# Step 2. Fill up the initial data structures in preparation
# for identifying potential refinements
for sh in stmts_this_type:
for role in roles:
agent_keys = self._agent_keys_for_stmt_role(
stmts_by_hash[sh], role)
for agent_key in agent_keys:
self.shared_data[stmt_type]['agent_key_to_hash'][
role][agent_key].add(sh)
self.shared_data[stmt_type]['hash_to_agent_key'][
role][sh].add(agent_key)
for role in roles:
self.shared_data[stmt_type]['all_keys_by_role'][role] = \
set(self.shared_data[stmt_type]['agent_key_to_hash'][role])
@staticmethod
def _agent_keys_for_stmt_role(stmt, role):
"""Return a set of agent keys for a statement's agent in a role.
The agent key is an "anchor" to the ontology being used and positons
a statement, via its agent in this role against other statements it
may be related to.
"""
agents = getattr(stmt, role)
# Handle a special case here where a list=like agent
# role can be empty, here we will consider anything else
# to be a refinement, hence add a None key
if isinstance(agents, list) and not agents:
agent_keys = {None}
# Generally, we take all the agent keys for a single or
# list-like agent role.
else:
agent_keys = {get_agent_key(agent) for agent in
(agents if isinstance(agents, list)
else [agents])}
return agent_keys
def get_related(self, stmt, possibly_related=None,
direction='less_specific'):
# Corner case: if this is a new statement that wasn't part of the
# initialization, it is possible that it has a type that we've not
# seen during initialization at all. In this case, we can assume
# there are no refinements for it.
stmt_type = indra_stmt_type(stmt)
if stmt_type not in self.shared_data:
return {}
# Step 1. Recover relevant parts ot the initialized data
hash_to_agent_key = self.shared_data[stmt_type]['hash_to_agent_key']
agent_key_to_hash = self.shared_data[stmt_type]['agent_key_to_hash']
all_keys_by_role = self.shared_data[stmt_type]['all_keys_by_role']
# Step 2. We iterate over all statements and find ones that this one
# can refine
stmt_hash = stmt.get_hash()
relevants = possibly_related
# We now iterate over all the agent roles in the given statement
# type
for role, hash_to_agent_key_for_role in hash_to_agent_key.items():
# If we have seen this statement before during initialization then
# we can use its precalculated agent keys, otherwise we
# calculate new agent keys for it.
if stmt_hash in hash_to_agent_key_for_role:
agent_keys = hash_to_agent_key_for_role[stmt_hash]
else:
agent_keys = self._agent_keys_for_stmt_role(stmt, role)
# We get all the agent keys in all other statements that the
# agent in this given role in this statement can refine.
for agent_key in agent_keys:
relevant_keys = get_relevant_keys(
agent_key,
all_keys_by_role[role],
self.ontology,
direction=direction)
# We now get the actual statement hashes that these other
# potentially refined agent keys appear in in the given role
role_relevant_stmt_hashes = set.union(
*[agent_key_to_hash[role][rel]
for rel in relevant_keys]) - {stmt_hash}
# In the first iteration, we initialize the set with the
# relevant statement hashes
if relevants is None:
relevants = role_relevant_stmt_hashes
# In subsequent iterations, we take the intersection of
# the relevant sets per role
else:
relevants &= role_relevant_stmt_hashes
# These hashes are now the ones that this statement needs
# to be compared against. Importantly, the relationship is in
# a well-defined direction so we don't need to test both ways.
return relevants
class RefinementConfirmationFilter(RefinementFilter):
"""This class runs the refinement function between potentially
related statements to confirm whether they are indeed, conclusively
in a refinement relationship with each other.
In this sense, this isn't a real filter, though implementing it
as one is convenient. This filter is meant to be used as the final
component in a series of pre-filters.
"""
def __init__(self, ontology, refinement_fun=None):
self.ontology = ontology
self.refinement_fun = refinement_fun if refinement_fun else \
default_refinement_fun
self.shared_data = {}
self.comparison_counter = 0
def get_related(self, stmt, possibly_related=None,
direction='less_specific'):
stmts_by_hash = self.shared_data['stmts_by_hash']
relateds = set()
# We use the previously constructed set of statements that this one
# can possibly refine
for possible_related_hash in possibly_related:
more_spec_stmt, less_spec_stmt = (
(stmt, stmts_by_hash[possible_related_hash])
if direction == 'less_specific'
else (stmts_by_hash[possible_related_hash], stmt)
)
# And then do the actual comparison. Here we use
# entities_refined=True which means that we assert that
# the entities, in each role, are already confirmed to
# be "compatible" for refinement, and therefore, we
# don't need to again confirm this (i.e., call "isa") in
# the refinement_of function.
ref = self.refinement_fun(
more_spec_stmt,
less_spec_stmt,
ontology=self.ontology,
# NOTE: here we assume that the entities at this point
# are definitely refined due to the use of an
# ontology-based pre-filter. If this is not the case
# for some reason then it is the responsibility of the
# user-supplied self.refinement_fun to disregard the
# entities_refined argument.
entities_refined=True)
self.comparison_counter += 1
if ref:
relateds.add(possible_related_hash)
return relateds
class SplitGroupFilter(RefinementFilter):
"""This filter implements splitting statements into two groups and
only considering refinement relationships between the groups but not
within them."""
def __init__(self, split_groups):
super().__init__()
self.split_groups = split_groups
def get_related(self, stmt, possibly_related=None,
direction='less_specific'):
sh = stmt.get_hash()
group = self.split_groups.get(sh)
# We take all the hashes that are in a different group, and
# return all of them if possibly_related is None (i.e., there
# was no previous filter), or if the given statement is
# also possibly related
possibly_related_prior = possibly_related \
if possibly_related is not None \
else self.shared_data['stmts_by_hash']
related = {stmt_hash for stmt_hash in possibly_related_prior
if self.split_groups[stmt_hash] != group}
return related
def default_refinement_fun(st1, st2, ontology, entities_refined):
return st1.refinement_of(st2, ontology, entities_refined)
| [
"[email protected]"
] | |
55f1dc9325861a9e19d2e9a887425c689284659e | 0c6c7365d6ff8b694bc906ec5f74c741e8bb0d37 | /Algorithms/922-Sort-Array-By-Parity-II.py | b95848daeaa9b922111c67403417c1599a5222c5 | [] | no_license | XiongQiuQiu/leetcode-slove | d58ab90caa250c86b7a1ade8b60c669821d77995 | 60f0da57b8ea4bfb937e2fe0afe3caea719cd7e4 | refs/heads/master | 2021-01-23T11:21:15.069080 | 2019-07-08T15:42:48 | 2019-07-08T15:42:48 | 93,133,558 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 775 | py | '''
Given an array A of non-negative integers, half of the integers in A are odd, and half of the integers are even.
Sort the array so that whenever A[i] is odd, i is odd; and whenever A[i] is even, i is even.
You may return any answer array that satisfies this condition.
Example 1:
Input: [4,2,5,7]
Output: [4,5,2,7]
Explanation: [4,7,2,5], [2,5,4,7], [2,7,4,5] would also have been accepted.
Note:
2 <= A.length <= 20000
A.length % 2 == 0
0 <= A[i] <= 1000
'''
class Solution(object):
def sortArrayByParityII(self, A):
"""
:type A: List[int]
:rtype: List[int]
"""
odds,evens = [odd for odd in A if odd %2],[even for even in A if even%2 == 0]
return [odds.pop() if i % 2 else evens.pop() for i in range(len(A)) ] | [
"[email protected]"
] | |
1eb7553184f9f93b4f42a7d94f77117f5205d59e | 78171e8cfbc44c547ee07d6e5a85e595fb7397a1 | /analytics/migrations/0001_initial.py | c0e9b067863b881a1c5ab3492b2f8dbcc91c19cf | [] | no_license | jimpalowski/URLshortener | 37b41a3818679c1e0707f02f57147e87a651063c | f7b8450ce2e858dff1e6fec11f9fd5dfec3d3e26 | refs/heads/master | 2021-09-05T11:44:36.564719 | 2018-01-27T01:59:56 | 2018-01-27T01:59:56 | 118,685,038 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 896 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2018-01-26 19:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('shortener', '0003_auto_20180125_2359'),
]
operations = [
migrations.CreateModel(
name='ClickEvent',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=0)),
('updated', models.DateTimeField(auto_now=True)),
('timestamp', models.DateTimeField(auto_now_add=True)),
('kirr_url', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='shortener.KirrURL')),
],
),
]
| [
"[email protected]"
] | |
037914c9c349ecf834267d3eb3e6e5b20c208d0b | 50dd2a43daa8316fc11e0c176b5872738fcc5dde | /Learning/049_Decorators/deco.py | a73431eabd8fc058356752fa444fc38ffd559521 | [] | no_license | FrenchBear/Python | 58204d368e3e72071eef298ff00d06ff51bd7914 | b41ab4b6a59ee9e145ef2cd887a5fe306973962b | refs/heads/master | 2023-08-31T18:43:37.792427 | 2023-08-26T15:53:20 | 2023-08-26T15:53:20 | 124,466,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,383 | py | # deco.py
# Exercices on decorators
#
# 2018-09-03 PV
# 2018-09-07 PV Variant with a class
# 2018-10-01 PV Expanded with execution time and following code
import functools
def uppercase(func):
# Preserve __name__ and __doc__ of original function in the decorated version
@functools.wraps(func)
def wrapper(*args, **kwargs):
original_result = func(*args, **kwargs)
modified_result = original_result.upper()
return modified_result
return wrapper
def strong(func):
def wrapper():
return '<strong>' + func() + '</strong>'
return wrapper
def emphasis(func):
def wrapper():
return '<em>' + func() + '</em>'
return wrapper
@strong
@emphasis
@uppercase
def greet():
return 'Hello!'
print(greet())
@uppercase
def hello(name: str) -> str:
"""This is a polite function to say hello"""
return 'Hello '+name+'!'
print(hello('Pierre'))
print("name:", hello.__name__)
print("doc:", hello.__doc__)
def trace(func):
def wrapper(*args, **kwargs):
print(f'TRACE: calling {func.__name__}() with {args}, {kwargs}')
original_result = func(*args, **kwargs)
print(f'TRACE: {func.__name__}() returned {original_result!r}')
return original_result
return wrapper
@trace
def say(name, line):
return f'{name}: {line}'
print(say('Jane', 'Hello, World'))
# Variant, with a class
# While a function embedded in a function gets a closer with outer function parameters, there is
# no such thing here, and self.original is a "manual closure"
class SkipLines():
def __init__(self, n):
self.n = n
self.original = None
# Callable that handles registration
def __call__(self, f):
self.original = f
return self.relay
def relay(self, *args, **kwargs):
for _ in range(self.n):
print('-'*20)
self.original(*args, **kwargs)
for _ in range(self.n):
print('-'*20)
@SkipLines(2)
def PrintHello(n):
print("Hello,", n)
PrintHello("Pierre")
# ----------------------------------------------
# Decorator to output running time of a function
# Use @functools.wraps(func) to preserve __name__ and __doc__ of decorated function
import time
import functools
print("\nMeasuring execution time")
def clock(func):
@functools.wraps(func)
def clocked(*args, **kwargs):
t0 = time.perf_counter()
result = func(*args, **kwargs)
elapsed = time.perf_counter() - t0
name = func.__name__
arg_lst = []
if args:
arg_lst.append(', '.join(repr(arg) for arg in args))
if kwargs:
pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]
arg_lst.append(', '.join(pairs))
arg_str = ', '.join(arg_lst)
print('[%0.8fs] %s(%s) -> %r ' % (elapsed, name, arg_str, result))
return result
return clocked
# Second version, parameterizable: decorator is a function returning a decorator!
DEFAULT_FMT = '[{elapsed:0.8f}s] {name}({args}) -> {result}'
def clock2(fmt=DEFAULT_FMT):
def decorate(func):
@functools.wraps(func)
def clocked(*posargs, **kwargs):
t0 = time.perf_counter()
result = func(*posargs, **kwargs)
elapsed = time.perf_counter() - t0
name = func.__name__
arg_lst = []
if posargs:
arg_lst.append(', '.join(repr(arg) for arg in posargs))
if kwargs:
pairs = ['%s=%r' % (k, w) for k, w in sorted(kwargs.items())]
arg_lst.append(', '.join(pairs))
args = ', '.join(arg_lst)
print(fmt.format(**locals())) # locals() is a dictionary of local variables
return result
return clocked
return decorate
@clock
def snooze(seconds):
time.sleep(seconds)
@clock2()
def factorial(n):
return 1 if n < 2 else n*factorial(n-1)
print('Calling {}(0.25)'.format(snooze.__name__))
snooze(0.25)
print('Calling factorial(6)')
f6 = factorial(n=6)
# ----------------------------------------------
# Use of @functools.lru_cache() to implement a cache of recent calls to avoid executing again
# Artificial but impressive example
print("\[email protected]_cache()")
@clock
def fibo1(n):
if n < 2:
return n
return fibo1(n-2) + fibo1(n-1)
print('calling fibo1(6)')
print(fibo1(6))
@functools.lru_cache()
@clock
def fibo2(n):
if n < 2:
return n
return fibo2(n-2) + fibo2(n-1)
print('calling fibo2(6)')
print(fibo2(6))
# ----------------------------------------------
# Use of singledispatch to provide 'overrides' on 1st parameter type
import math
import scipy.special #type: ignore
print("\[email protected]")
@functools.singledispatch
def generalized_factorial(obj):
raise ValueError()
@generalized_factorial.register(int)
def fact_i(n):
# print('fact_i')
return 1 if n < 2 else n*fact_i(n-1)
@generalized_factorial.register(float)
def fact_f(x):
# print('fact_f')
return math.gamma(x+1)
@generalized_factorial.register(complex)
def fact_c(x):
return scipy.special.gamma(x+1)
print('3! =', generalized_factorial(3))
print('3.5! =', generalized_factorial(3.5))
print('4! =', generalized_factorial(4))
print('(4+0.01j)! =', generalized_factorial(4+0.01j))
| [
"[email protected]"
] | |
1650ea5291fb078028b18bc7b1d33ce095acb1aa | 3fce653f12af1b98b6b87e3d87e7d10483ef6fac | /hood/migrations/0003_auto_20181018_1012.py | f514eafff572aa923446e6983049b97236ec7ec9 | [] | no_license | IreriVIkki/hoodip | 6dba100d75a69b0dd146205557cbaba4ec2555e2 | 610629d6a54920e66b7e30156b11887de7fe8db4 | refs/heads/master | 2020-04-01T14:31:31.729134 | 2018-10-23T11:27:31 | 2018-10-23T11:27:31 | 153,297,767 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 403 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-10-18 07:12
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('hood', '0002_auto_20181018_0936'),
]
operations = [
migrations.RenameModel(
old_name='NeighbourHood',
new_name='NeighborHood',
),
]
| [
"[email protected]"
] | |
ee0d03e5ed294173e0df5f582729e2d0b61ef73f | 17926b196d9db43816453d16f3da84de6664f2fd | /155_Mini_Stack.py | 76a37a32f326092b1b0ac94945275b9c1a9c62f7 | [] | no_license | luchang59/leetcode | 66690a3c9b28a5201a7be8cd0134142b48418adb | feab001b9291f6e57c44eeb0b625fdaa145d19b4 | refs/heads/master | 2020-05-28T06:57:20.667138 | 2019-09-20T18:18:11 | 2019-09-20T18:18:11 | 188,914,681 | 0 | 0 | null | 2019-05-27T22:17:44 | 2019-05-27T22:08:54 | null | UTF-8 | Python | false | false | 717 | py | class MinStack:
def __init__(self):
"""
initialize your data structure here.
"""
self.stack = []
def push(self, x):
"""
:type x: int
:rtype: None
"""
curMin = self.getMin()
if curMin == None or x < curMin:
curMin = x
self.stack.append((x, curMin))
def pop(self):
"""
:rtype: None
"""
self.stack.pop()
def top(self):
"""
:rtype: int
"""
return self.stack[-1][0] if self.stack else None
def getMin(self):
"""
:rtype: int
"""
return self.stack[-1][1] if self.stack else None
| [
"[email protected]"
] | |
b07ebe4341d3f201e7ded401c8ae97e2d1385731 | cbe264842df4eae3569b28ed4aae9489014ed23c | /python/coding_dojang/judge_regular_expression.py | 5418ef520574bac18ef5149097b2d76c3df53c2e | [
"MIT"
] | permissive | zeroam/TIL | 31e176c2f4c3e1ef72b1155353690cc2f7160f96 | 43e3573be44c7f7aa4600ff8a34e99a65cbdc5d1 | refs/heads/master | 2021-07-23T01:43:34.135033 | 2021-07-10T06:47:17 | 2021-07-10T06:47:17 | 167,952,375 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 617 | py | import re
p = re.compile('^(http(s)?://)?[\w\-]+\.[\w\-]+[\w\-_.?=/&#:]+$')
urls = [
'https://www.example.com',
'http://www.example.com',
'www.example.com',
'example.com',
'http://blog.example.com',
'http://www.example.com/product',
'http://www.example.com/products?id=1&page=2',
'http://www.example.com#up',
'http://255.255.255.255',
'255.255.255.255',
'http://invalid.com/perl.cgi?key= | http://web-site.com/cgi-bin/perl.cgi?key1=value1&key2',
'http://www.site.com:8008'
]
for url in urls:
print(p.match(url) != None, end=' ')
print(p.match(url))
| [
"[email protected]"
] | |
0d54a8ca3d2b786f788a93b14b7817b06777b682 | b48764e6684ffbd73b0043dc889c013860642e8d | /1ํ๊ธฐ/area1.py | f237fe223ef10a6ce0fda088efe636035c0f451d | [] | no_license | tanghee/Programming-Python- | c6d32a1e49d5c95c8359aeb8775cb52cc665167a | eb402357ad31638d867042e76af507bc6c67a0b4 | refs/heads/master | 2022-03-27T07:27:18.888660 | 2019-12-10T02:06:41 | 2019-12-10T02:06:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 180 | py | width = 3
height = 5
print("๊ฐ๋ก ", width, "์ธ๋ก", height, "์ธ ์ผ๊ฐํ์ ๋์ด: ", 3*5*1/2)
print("๊ฐ๋ก ", width, "์ธ๋ก", height, "์ธ ์ผ๊ฐํ์ ๋์ด: ", 3*5*1/2) | [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.