blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 3
616
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
112
| license_type
stringclasses 2
values | repo_name
stringlengths 5
115
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 777
values | visit_date
timestamp[us]date 2015-08-06 10:31:46
2023-09-06 10:44:38
| revision_date
timestamp[us]date 1970-01-01 02:38:32
2037-05-03 13:00:00
| committer_date
timestamp[us]date 1970-01-01 02:38:32
2023-09-06 01:08:06
| github_id
int64 4.92k
681M
⌀ | star_events_count
int64 0
209k
| fork_events_count
int64 0
110k
| gha_license_id
stringclasses 22
values | gha_event_created_at
timestamp[us]date 2012-06-04 01:52:49
2023-09-14 21:59:50
⌀ | gha_created_at
timestamp[us]date 2008-05-22 07:58:19
2023-08-21 12:35:19
⌀ | gha_language
stringclasses 149
values | src_encoding
stringclasses 26
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 3
10.2M
| extension
stringclasses 188
values | content
stringlengths 3
10.2M
| authors
listlengths 1
1
| author_id
stringlengths 1
132
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
c21d74a662d5db8b34c6793c5b0def3026ab0cfe | 8afb5afd38548c631f6f9536846039ef6cb297b9 | /_MY_ORGS/Web-Dev-Collaborative/blog-research/ciriculumn/week.18-/W18D2_lectures/08-posts/app/routes.py | 91d6b5ace00c8f67a076bc546d9f4e510c7630de | [
"MIT"
] | permissive | bgoonz/UsefulResourceRepo2.0 | d87588ffd668bb498f7787b896cc7b20d83ce0ad | 2cb4b45dd14a230aa0e800042e893f8dfb23beda | refs/heads/master | 2023-03-17T01:22:05.254751 | 2022-08-11T03:18:22 | 2022-08-11T03:18:22 | 382,628,698 | 10 | 12 | MIT | 2022-10-10T14:13:54 | 2021-07-03T13:58:52 | null | UTF-8 | Python | false | false | 809 | py | from flask import render_template, redirect
from app import app
from app.forms.login import LoginForm
@app.route('/')
def index():
return render_template('page.html', title='Welcome')
@app.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
return redirect('/')
return render_template('login.html', form=form)
@app.route('/help')
def help():
return render_template('page.html', title='Help')
@app.route('/item/<int:id>')
def item(id):
if (id > 0 and id < 100):
item = {
"id": id,
"name": f"Fancy Item {id}",
"description": "Coming soon!",
}
return render_template('item.html', item=item)
else:
return '<h1>Sample App</h1><h2>Item Not Found</h2>'
| [
"[email protected]"
] | |
a0c82a506c91a3a7c0b678adac1283adedd35094 | 6bd047eb1951601a5a7bab564eb2abba92c6c004 | /prices/api/queries.py | 0fdac2905d72924ea823efd6ca273a290b653fd8 | [] | no_license | volgoweb/DDD_sandbox | 6ab2b43d3fcad8eb2f802bd485e5dbc05eb2e10d | 700c2848d5341ab267e69326bac2487657450d22 | refs/heads/master | 2021-01-01T15:46:13.244679 | 2017-07-11T06:18:36 | 2017-07-11T06:18:36 | 97,695,978 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | from utils.queries import IQuery
class GetProductPricingForOneProduct(IQuery):
def __init__(self, product_id: int):
self.product_id = product_id
@classmethod
def get_query_type_name(cls):
return 'prices.GetProductPricingForOneProduct'
class GetProductPricingForManyProducts(IQuery):
def __init__(self, product_id: int):
self.product_id = product_id
@classmethod
def get_query_type_name(cls):
return 'prices.GetProductPricingForManyProducts'
class GetProductPricingForAllProducts(IQuery):
@classmethod
def get_query_type_name(cls):
return 'prices.GetProductPricingForAllProducts'
| [
"[email protected]"
] | |
54ab05db85f18373b1cd489a5310e729a167c100 | 08e039046e2b3c526b5fd2169e02d5c5bbe253c5 | /0x04-python-more_data_structures/0-main.py | f37cdb42818fedc7a8af51018ae298fef65412fb | [] | no_license | VinneyJ/alx-higher_level_programming | 22a976a22583334aff1f0c4120fb81117905e35b | 0ea8719ec5f28c76faf06bb5e67c14abb71fa3d0 | refs/heads/main | 2023-07-31T15:44:30.390103 | 2021-10-01T21:27:31 | 2021-10-01T21:27:31 | 361,816,988 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 225 | py | #!/usr/bin/python3
square_matrix_simple = __import__('0-square_matrix_simple').square_matrix_simple
matrix = [
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
]
new_matrix = square_matrix_simple(matrix)
print(new_matrix)
print(matrix)
| [
"[email protected]"
] | |
bb3fb9eb21687447b680c05b829baec91643160f | b76ae361ab277923d0fed969b795074a1ecb400b | /project/RealDjango/venv/Scripts/django-admin.py | cfd7845dd7575e87f8a033bd3ad032b3f72ce470 | [] | no_license | RobotNo42/old_coed | 995df921e31d5a9b65f1609380235330edb546ad | 59f82e5d58965dd5c6340f4daf4ef43d1d311252 | refs/heads/master | 2021-07-18T00:07:33.450173 | 2020-06-16T13:51:11 | 2020-06-16T13:51:11 | 180,384,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 161 | py | #!D:\python\project\RealDjango\venv\Scripts\python.exe
from django.core import management
if __name__ == "__main__":
management.execute_from_command_line()
| [
"[email protected]"
] | |
eb439ef5321ace82cb57ceda8a14ba4f5978f5b8 | 52508ce70294ec29f84c9e551d8b92a7b402913d | /anyflow/flow.py | 647b6d25ed535d940110f411ddcc5ba175e061f2 | [
"MIT"
] | permissive | Cologler/anyflow-python | 0f18003a39cb645c24aa5549df4ee39173977fff | cde20b0c74faf18cb7dc503072d4c2f99d5681de | refs/heads/master | 2021-07-30T15:12:19.478682 | 2021-07-29T14:08:22 | 2021-07-29T14:08:22 | 241,824,800 | 0 | 0 | MIT | 2021-07-29T14:08:22 | 2020-02-20T07:53:25 | Python | UTF-8 | Python | false | false | 2,957 | py | # -*- coding: utf-8 -*-
#
# Copyright (c) 2020~2999 - Cologler <[email protected]>
# ----------
#
# ----------
from typing import Callable, Any, List
from abc import ABC, abstractmethod
from .err import Abort
from .ctx import FlowContext
#Next = Callable[[], Any]
class MiddlewareInvoker:
__slots__ = ('_ctx', '_factorys')
def __init__(self, factorys: list, ctx: FlowContext):
super().__init__()
self._factorys = factorys
self._ctx = ctx
def invoke(self) -> Any:
if self._factorys:
return self.run_middleware(0)
def run_middleware(self, index) -> Any:
factory = self._factorys[index]
middleware = factory(self._ctx)
next = Next(self, index+1)
return middleware(self._ctx, next)
def has_next(self, next_index: int):
'return whether has the next middleware.'
return len(self._factorys) > next_index
class Next:
__slots__ = ('_invoker', '_next_index', '_retvals')
def __init__(self, invoker: MiddlewareInvoker, next_index: int):
super().__init__()
self._invoker = invoker
self._next_index = next_index
self._retvals = None
def __call__(self, or_value=None):
if not self._retvals:
if self._invoker.has_next(self._next_index):
rv = self._invoker.run_middleware(self._next_index)
else:
rv = or_value
self._retvals = (rv, )
return self._retvals[0]
@property
def is_nop(self):
return not self._invoker.has_next(self._next_index)
Middleware = Callable[[FlowContext, Next], Any]
MiddlewareFactory = Callable[[FlowContext], Middleware]
class Flow:
def __init__(self, *, ctx_cls=FlowContext, state: dict=None):
super().__init__()
if not issubclass(ctx_cls, FlowContext):
raise TypeError(f'excepted subclass of FlowContext, got {ctx_cls}')
self._ctx_cls = ctx_cls
self._factorys = []
self.suppress_abort = False
self._state = dict(state or ()) # make a clone
def run(self, state: dict=None):
ctx_state = self._state.copy()
ctx_state.update(state or ())
ctx = self._ctx_cls(ctx_state)
invoker = MiddlewareInvoker(self._factorys.copy(), ctx)
try:
return invoker.invoke()
except Abort:
if not self.suppress_abort:
raise
def use(self, middleware: Middleware=None):
'''
*this method can use as decorator.*
'''
if middleware is None:
return lambda m: self.use(m)
return self.use_factory(lambda _: middleware)
def use_factory(self, middleware_factory: MiddlewareFactory=None):
'''
*this method can use as decorator.*
'''
if middleware_factory is None:
return lambda mf: self.use_factory(mf)
self._factorys.append(middleware_factory)
| [
"[email protected]"
] | |
bec64a7169611c133f6effb658d194136f903149 | feff273063b4c89bde3aa190b4e49c83ab1e5855 | /memphis/view/layout.py | 6a21cc666494b2107e8bc16e66c706a54a3bb83b | [
"BSD-2-Clause",
"BSD-3-Clause"
] | permissive | mcdonc/memphis | 7d53b8f77f7bab7c20a258a9ab33d1cc663711a2 | daef09507eacb32b235faf070a0146ffb5cf035f | refs/heads/master | 2016-09-05T23:04:01.578991 | 2011-10-11T03:37:43 | 2011-10-11T03:37:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,147 | py | """ layout implementation """
import sys, logging
from zope import interface
from pyramid.interfaces import IRequest, IRouteRequest
from memphis import config
from memphis.view.base import View
from memphis.view.formatter import format
from memphis.view.interfaces import ILayout
from memphis.view.customize import LayerWrapper
log = logging.getLogger('memphis.view')
def queryLayout(request, context, name=''):
""" query named layout for context """
while context is not None:
layout = config.registry.queryMultiAdapter(
(context, request), ILayout, name)
if layout is not None:
return layout
context = getattr(context, '__parent__', None)
return None
class Layout(View):
interface.implements(ILayout)
name = ''
template = None
view = None
viewcontext = None
@property
def __name__(self):
return self.name
def render(self, content, **kwargs):
if self.template is None:
return content
kwargs.update({'view': self,
'content': content,
'context': self.context,
'request': self.request,
'format': format})
return self.template(**kwargs)
def __call__(self, content, layout=None, view=None):
if view is not None:
self.view = view
self.viewcontext = getattr(view, 'context', self.context)
if layout is not None:
self.view = layout.view or self.view
self.viewcontext = layout.viewcontext or self.viewcontext
result = self.render(content, **(self.update() or {}))
if self.layout is None:
return result
parent = getattr(view, '__parent__', self.context)
if self.name != self.layout:
layout = queryLayout(self.request, parent, self.layout)
if layout is not None:
return layout(result, layout=self, view=view)
else:
if layout is not None:
context = layout.context
else:
context = self.context
parent = getattr(context, '__parent__', None)
if parent is not None:
layout = queryLayout(self.request, parent, self.layout)
if layout is not None:
return layout(result, view=view)
log.warning("Can't find parent layout: '%s'"%self.layout)
return self.render(result)
def registerLayout(
name='', context=None, parent='',
klass=Layout, template = None, route=None, layer=''):
if not klass or not issubclass(klass, Layout):
raise ValueError("klass has to inherit from Layout class")
discriminator = ('memphis.view:layout', name, context, route, layer)
info = config.DirectiveInfo()
info.attach(
config.Action(
LayerWrapper(registerLayoutImpl, discriminator),
(klass, name, context, template, parent, route),
discriminator = discriminator)
)
def registerLayoutImpl(klass, name, context, template, parent, route_name):
if klass in _registered:
raise ValueError("Class can't be reused for different layouts")
if not parent:
layout = None
elif parent == '.':
layout = ''
else:
layout = parent
# class attributes
cdict = {'name': name,
'layout': layout}
if template is not None:
cdict['template'] = template
if issubclass(klass, Layout) and klass is not Layout:
layout_class = klass
_registered.append(klass)
for attr, value in cdict.items():
setattr(layout_class, attr, value)
else:
layout_class = type(str('Layout<%s>'%name), (Layout,), cdict)
# register layout
request_iface = IRequest
if route_name is not None:
request_iface = config.registry.getUtility(IRouteRequest,name=route_name)
config.registry.registerAdapter(
layout_class, (context, request_iface), ILayout, name)
_registered = []
@config.addCleanup
def cleanUp():
_registered[:] = []
| [
"[email protected]"
] | |
d16ef364cf6106a88a1b28a9a96bdae89166f80c | 778a3e1e70b0b2782d2a35f8818bbe799e6c7396 | /Seventh_week_exersice/03Sum_prime_non_prime.py | def7d6757eba88c49c90e3f84af9077c2e5e6b72 | [] | no_license | skafev/Python_basics | 0088203207fe3960b26944e0940acaec40a8caaf | 8bfc1a8b0dad3bf829fffbd539cebe3688f75974 | refs/heads/main | 2023-06-10T11:25:27.468914 | 2021-07-01T15:28:12 | 2021-07-01T15:28:12 | 382,078,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 447 | py | number = input()
not_prime = 0
prime = 0
while number != "stop":
number = int(number)
if number < 0:
print("Number is negative.")
elif number > 3:
if number % 2 == 0 or number % 3 == 0:
not_prime += number
else:
prime += number
else:
prime += number
number = input()
print(f"Sum of all prime numbers is: {prime}")
print(f"Sum of all non prime numbers is: {not_prime}") | [
"[email protected]"
] | |
2c8269a6d5c4ddb8c4b445466174f74aecf370f6 | 857b051f99e8a42f94dd5895c7ac735e37867e94 | /hakkimizda/urls.py | 56836439053617eeb3e0ba317c7a1333be1e19df | [
"MIT"
] | permissive | kopuskopecik/projem | a88e4970ef23a4917e590e1a0a19ac7c49c86a73 | 738b0eeb2bf407b4ef54197cce1ce26ea67279c8 | refs/heads/master | 2021-06-22T10:04:44.523681 | 2020-12-25T19:56:10 | 2020-12-25T19:56:10 | 172,302,265 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 171 | py | from django.urls import path
from django.conf.urls import url
from .views import *
app_name="hakkimizda"
urlpatterns = [
path('hakkimizda/', hakkimizda, name="hak"),
] | [
"[email protected]"
] | |
1d6af3af9fa41162b76ba04790d68e5e149b3219 | 2ca07aecfa6ff25b0baae6dc9a707a284c2d1b6d | /trustzone_images/apps/bsp/build/scripts/genuses.py | 44dde7dfd37d5c023ae14fb6e8d49ccd9fafb72d | [] | no_license | zhilangtaosha/msm8996-wp-1-0_test_device | ef05af263ba7955263ff91eb81d45b2437bc492e | 6af9b44abbc4a367a9aaae26707079974c535f08 | refs/heads/master | 2023-03-19T02:42:09.581740 | 2021-02-21T01:20:19 | 2021-02-21T01:20:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,954 | py | #===============================================================================
#
# genuses
#
# GENERAL DESCRIPTION
# Generates USES_FLAGS imformation from DATA file generate from build/ms.
#
# Copyright (c) 2009-2010 by Qualcomm Technologies, Incorporated.
# All Rights Reserved.
# QUALCOMM Proprietary/GTDR
#
#-------------------------------------------------------------------------------
#
# $Header: //components/rel/apps.tz/1.0.6/bsp/build/scripts/genuses.py#1 $
# $DateTime: 2016/12/02 01:50:16 $
# $Author: pwbldsvc $
# $Change: 11897059 $
# EDIT HISTORY FOR FILE
#
# This section contains comments describing changes made to the module.
# Notice that changes are listed in reverse chronological order.
#
# when who what, where, why
# -------- --- ---------------------------------------------------------
# 04/02/10 sk Created
#
#===============================================================================
import os
import subprocess
import string
import sys
import re, string, os
from array import array
from optparse import OptionParser
from datetime import datetime
#===============================================================================
# parse_args
# parse command line arguments
#===============================================================================
def parse_args():
usage = "usage: %prog [options]"
version = "%prog 1.0"
parser = OptionParser(usage=usage, version=version)
parser.add_option("-f", "--datfile", dest="dat_filename",
help="Read preprocess data from FILE", metavar="FILE")
parser.add_option("-o", "--outfile", dest="output_filename",
help="Write output to FILE", metavar="FILE")
parser.add_option("-v", "--verbose",
action="store_true", dest="verbose", default=False,
help="print status messages to stdout")
(options, args) = parser.parse_args()
if options.dat_filename is None:
parser.error("--datfile option must be defined")
sys.exit(2)
if options.output_filename is None:
parser.error("--outfile option must be defined")
sys.exit(2)
return (options, args)
#===============================================================================
# create_file_banner
# creates a string that can be use as a banner for auto generated files
#===============================================================================
def create_file_banner(fname, description="None", start_comment="#",
end_comment="", start_block="", end_block="", style='none'):
banner_str = \
'''$SB$SCM============================================================================$ECM
$SCM Name: $ECM
$SCM $FILE_NAME $ECM
$SCM
$SCM Description: $ECM
$SCM $DESCRIPTION $ECM
$SCM $ECM
$SCM Copyright (c) $YEAR by QUALCOMM, Incorporated. All Rights Reserved. $ECM
$SCM============================================================================$ECM
$SCM $ECM
$SCM *** AUTO GENERATED FILE - DO NOT EDIT $ECM
$SCM $ECM
$SCM GENERATED: $DATE $ECM
$SCM============================================================================$ECM$EB
'''
if style == 'C':
start_comment = "#"
end_comment = ""
start_block = "/*\n"
end_block = "\n*/"
elif style == 'C++':
start_comment = "//"
end_comment = ""
start_block = ""
end_block = ""
elif style == 'asm':
start_comment = ";"
end_comment = ""
start_block = ""
end_block = ""
elif style == 'make' or style == 'shell':
start_comment = "#"
end_comment = ""
start_block = ""
end_block = ""
elif style == 'dos':
start_comment = "REM "
end_comment = ""
start_block = ""
end_block = ""
banner_str = banner_str.replace('$SCM', start_comment)
banner_str = banner_str.replace('$ECM', end_comment)
banner_str = banner_str.replace('$SB', start_block)
banner_str = banner_str.replace('$EB', end_block)
banner_str = banner_str.replace('$YEAR', str(datetime.now().strftime('%Y')))
banner_str = banner_str.replace('$DATE', str(datetime.now().ctime()))
banner_str = banner_str.replace('$FILE_NAME', fname)
banner_str = banner_str.replace('$DESCRIPTION', description)
return banner_str
def CleanLine(aLine):
aLine = aLine.replace('(','{')
aLine = aLine.replace(')','}')
aLine = aLine.replace('\n','')
aLine = aLine.replace(':=','=')
aLine = aLine.replace('?=','=')
return aLine
def CleanVarName(aVarname):
aVarname = aVarname.replace('.', '_')
aVarname = aVarname.replace('export', '')
aVarname = aVarname.replace('define', '')
aVarname = re.sub('\s', '', aVarname) #get rid of whitespaces
return aVarname
def CleanVarValue(aVarvalue):
aVarvalue = aVarvalue.strip()
return aVarvalue
def WriteData (options, file_handle, data, new_line="\n"):
file_handle.write(data + new_line)
if options.verbose:
print data
def main():
# get args from cmd line
(options, args) = parse_args()
uses = "USES"
lines = open(options.dat_filename, 'r').readlines()
total = ""
banner = create_file_banner(os.path.split(options.output_filename)[1])
out_file = open(options.output_filename, 'w')
WriteData(options, out_file, banner, new_line="")
WriteData(options, out_file, "def exists(env):")
WriteData(options, out_file, " return env.Detect('usesflags')")
WriteData(options, out_file, "")
WriteData(options, out_file, "def generate(env):")
VarNameDict = {}
#count = 0
for line in lines:
line = line.lstrip()
if line.find(uses, 0, 4)>-1:
line = CleanLine(line)
tempstr = line.split("=")
VarName = tempstr[0]
VarName = CleanVarName(VarName)
VarValue = tempstr[1]
VarValue = CleanVarValue(VarValue)
if VarValue == "yes":
vUsesFlag = True
else:
vUsesFlag = False
if vUsesFlag == True:
VarNameDict[VarName] = True
# sort keys and write file
#import pdb; pdb.set_trace()
uses_flags = sorted(VarNameDict.iterkeys())
for uflag in uses_flags:
WriteData(options, out_file, " env.Replace(%s = True)" % uflag)
WriteData(options, out_file, " env.Replace(USES_FLAGS = %s)" % str(uses_flags))
WriteData(options, out_file, " return None")
out_file.close()
#run
main() | [
"[email protected]"
] | |
c3973679874c6bcb06a9d97d54f6965242f7ef53 | 15f321878face2af9317363c5f6de1e5ddd9b749 | /solutions_python/Problem_143/388.py | 2c19f330a95780da7bb74471727f3b492214bb28 | [] | no_license | dr-dos-ok/Code_Jam_Webscraper | c06fd59870842664cd79c41eb460a09553e1c80a | 26a35bf114a3aa30fc4c677ef069d95f41665cc0 | refs/heads/master | 2020-04-06T08:17:40.938460 | 2018-10-14T10:12:47 | 2018-10-14T10:12:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | import math
def solve(a, b, k):
count = 0
i = 0
while i < a:
j = 0
while j < b:
if (i & j) < k:
count += 1
j += 1
i += 1
return count
name = "B-small-attempt0"
fi = open(name + ".in", "r")
fout = open(name + ".out", "w")
numTestCases = int(fi.readline())
print "#TestCases: ", numTestCases
for i in range(0, numTestCases):
line = fi.readline().strip().split(" ")
a = int(line[0])
b = int(line[1])
k = int(line[2])
fout.write("Case #" + str(i + 1) + ": " + str(solve(a, b, k)) + "\n")
#print "Case #" + str(i + 1) + ": " + str(solve(a, b, k))
fi.close()
fout.close() | [
"[email protected]"
] | |
f27cadf13a59eadb627295a3c642e84f8e57ccb1 | 3c000380cbb7e8deb6abf9c6f3e29e8e89784830 | /venv/Lib/site-packages/cobra/modelimpl/cloud/hostrouteringressbytes15min.py | cc28259373b4b2613646e12c80ac99530e16d402 | [] | no_license | bkhoward/aciDOM | 91b0406f00da7aac413a81c8db2129b4bfc5497b | f2674456ecb19cf7299ef0c5a0887560b8b315d0 | refs/heads/master | 2023-03-27T23:37:02.836904 | 2021-03-26T22:07:54 | 2021-03-26T22:07:54 | 351,855,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,198 | py | # coding=UTF-8
# **********************************************************************
# Copyright (c) 2013-2020 Cisco Systems, Inc. All rights reserved
# written by zen warriors, do not modify!
# **********************************************************************
from cobra.mit.meta import ClassMeta
from cobra.mit.meta import StatsClassMeta
from cobra.mit.meta import CounterMeta
from cobra.mit.meta import PropMeta
from cobra.mit.meta import Category
from cobra.mit.meta import SourceRelationMeta
from cobra.mit.meta import NamedSourceRelationMeta
from cobra.mit.meta import TargetRelationMeta
from cobra.mit.meta import DeploymentPathMeta, DeploymentCategory
from cobra.model.category import MoCategory, PropCategory, CounterCategory
from cobra.mit.mo import Mo
# ##################################################
class HostRouterIngressBytes15min(Mo):
"""
Mo doc not defined in techpub!!!
"""
meta = StatsClassMeta("cobra.model.cloud.HostRouterIngressBytes15min", "host router cloud ingress bytess")
counter = CounterMeta("unicast", CounterCategory.COUNTER, "bytes", "host router ingress unicast bytes")
counter._propRefs[PropCategory.IMPLICIT_LASTREADING] = "unicastLast"
counter._propRefs[PropCategory.IMPLICIT_CUMULATIVE] = "unicastCum"
counter._propRefs[PropCategory.IMPLICIT_PERIODIC] = "unicastPer"
counter._propRefs[PropCategory.IMPLICIT_MIN] = "unicastMin"
counter._propRefs[PropCategory.IMPLICIT_MAX] = "unicastMax"
counter._propRefs[PropCategory.IMPLICIT_AVG] = "unicastAvg"
counter._propRefs[PropCategory.IMPLICIT_SUSPECT] = "unicastSpct"
counter._propRefs[PropCategory.IMPLICIT_BASELINE] = "unicastBase"
counter._propRefs[PropCategory.IMPLICIT_THRESHOLDED] = "unicastThr"
counter._propRefs[PropCategory.IMPLICIT_TREND_BASE] = "unicastTrBase"
counter._propRefs[PropCategory.IMPLICIT_TREND] = "unicastTr"
counter._propRefs[PropCategory.IMPLICIT_RATE] = "unicastRate"
meta._counters.append(counter)
meta.moClassName = "cloudHostRouterIngressBytes15min"
meta.rnFormat = "CDcloudHostRouterIngressBytes15min"
meta.category = MoCategory.STATS_CURRENT
meta.label = "current host router cloud ingress bytess stats in 15 minute"
meta.writeAccessMask = 0x601
meta.readAccessMask = 0x601
meta.isDomainable = False
meta.isReadOnly = True
meta.isConfigurable = False
meta.isDeletable = False
meta.isContextRoot = False
meta.parentClasses.add("cobra.model.cloud.HostRouterTunnelInfoHolder")
meta.superClasses.add("cobra.model.stats.Item")
meta.superClasses.add("cobra.model.stats.Curr")
meta.superClasses.add("cobra.model.cloud.HostRouterIngressBytes")
meta.rnPrefixes = [
('CDcloudHostRouterIngressBytes15min', False),
]
prop = PropMeta("str", "childAction", "childAction", 4, PropCategory.CHILD_ACTION)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("deleteAll", "deleteall", 16384)
prop._addConstant("deleteNonPresent", "deletenonpresent", 8192)
prop._addConstant("ignore", "ignore", 4096)
meta.props.add("childAction", prop)
prop = PropMeta("str", "cnt", "cnt", 16212, PropCategory.REGULAR)
prop.label = "Number of Collections During this Interval"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("cnt", prop)
prop = PropMeta("str", "dn", "dn", 1, PropCategory.DN)
prop.label = "None"
prop.isDn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("dn", prop)
prop = PropMeta("str", "lastCollOffset", "lastCollOffset", 111, PropCategory.REGULAR)
prop.label = "Collection Length"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("lastCollOffset", prop)
prop = PropMeta("str", "modTs", "modTs", 7, PropCategory.REGULAR)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop.defaultValue = 0
prop.defaultValueStr = "never"
prop._addConstant("never", "never", 0)
meta.props.add("modTs", prop)
prop = PropMeta("str", "repIntvEnd", "repIntvEnd", 110, PropCategory.REGULAR)
prop.label = "Reporting End Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvEnd", prop)
prop = PropMeta("str", "repIntvStart", "repIntvStart", 109, PropCategory.REGULAR)
prop.label = "Reporting Start Time"
prop.isImplicit = True
prop.isAdmin = True
meta.props.add("repIntvStart", prop)
prop = PropMeta("str", "rn", "rn", 2, PropCategory.RN)
prop.label = "None"
prop.isRn = True
prop.isImplicit = True
prop.isAdmin = True
prop.isCreateOnly = True
meta.props.add("rn", prop)
prop = PropMeta("str", "status", "status", 3, PropCategory.STATUS)
prop.label = "None"
prop.isImplicit = True
prop.isAdmin = True
prop._addConstant("created", "created", 2)
prop._addConstant("deleted", "deleted", 8)
prop._addConstant("modified", "modified", 4)
meta.props.add("status", prop)
prop = PropMeta("str", "unicastAvg", "unicastAvg", 54328, PropCategory.IMPLICIT_AVG)
prop.label = "host router ingress unicast bytes average value"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastAvg", prop)
prop = PropMeta("str", "unicastBase", "unicastBase", 54323, PropCategory.IMPLICIT_BASELINE)
prop.label = "host router ingress unicast bytes baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastBase", prop)
prop = PropMeta("str", "unicastCum", "unicastCum", 54324, PropCategory.IMPLICIT_CUMULATIVE)
prop.label = "host router ingress unicast bytes cumulative"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastCum", prop)
prop = PropMeta("str", "unicastLast", "unicastLast", 54322, PropCategory.IMPLICIT_LASTREADING)
prop.label = "host router ingress unicast bytes current value"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastLast", prop)
prop = PropMeta("str", "unicastMax", "unicastMax", 54327, PropCategory.IMPLICIT_MAX)
prop.label = "host router ingress unicast bytes maximum value"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastMax", prop)
prop = PropMeta("str", "unicastMin", "unicastMin", 54326, PropCategory.IMPLICIT_MIN)
prop.label = "host router ingress unicast bytes minimum value"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastMin", prop)
prop = PropMeta("str", "unicastPer", "unicastPer", 54325, PropCategory.IMPLICIT_PERIODIC)
prop.label = "host router ingress unicast bytes periodic"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastPer", prop)
prop = PropMeta("str", "unicastRate", "unicastRate", 54333, PropCategory.IMPLICIT_RATE)
prop.label = "host router ingress unicast bytes rate"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastRate", prop)
prop = PropMeta("str", "unicastSpct", "unicastSpct", 54329, PropCategory.IMPLICIT_SUSPECT)
prop.label = "host router ingress unicast bytes suspect count"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastSpct", prop)
prop = PropMeta("str", "unicastThr", "unicastThr", 54330, PropCategory.IMPLICIT_THRESHOLDED)
prop.label = "host router ingress unicast bytes thresholded flags"
prop.isOper = True
prop.isStats = True
prop.defaultValue = 0
prop.defaultValueStr = "unspecified"
prop._addConstant("avgCrit", "avg-severity-critical", 2199023255552)
prop._addConstant("avgHigh", "avg-crossed-high-threshold", 68719476736)
prop._addConstant("avgLow", "avg-crossed-low-threshold", 137438953472)
prop._addConstant("avgMajor", "avg-severity-major", 1099511627776)
prop._addConstant("avgMinor", "avg-severity-minor", 549755813888)
prop._addConstant("avgRecovering", "avg-recovering", 34359738368)
prop._addConstant("avgWarn", "avg-severity-warning", 274877906944)
prop._addConstant("cumulativeCrit", "cumulative-severity-critical", 8192)
prop._addConstant("cumulativeHigh", "cumulative-crossed-high-threshold", 256)
prop._addConstant("cumulativeLow", "cumulative-crossed-low-threshold", 512)
prop._addConstant("cumulativeMajor", "cumulative-severity-major", 4096)
prop._addConstant("cumulativeMinor", "cumulative-severity-minor", 2048)
prop._addConstant("cumulativeRecovering", "cumulative-recovering", 128)
prop._addConstant("cumulativeWarn", "cumulative-severity-warning", 1024)
prop._addConstant("lastReadingCrit", "lastreading-severity-critical", 64)
prop._addConstant("lastReadingHigh", "lastreading-crossed-high-threshold", 2)
prop._addConstant("lastReadingLow", "lastreading-crossed-low-threshold", 4)
prop._addConstant("lastReadingMajor", "lastreading-severity-major", 32)
prop._addConstant("lastReadingMinor", "lastreading-severity-minor", 16)
prop._addConstant("lastReadingRecovering", "lastreading-recovering", 1)
prop._addConstant("lastReadingWarn", "lastreading-severity-warning", 8)
prop._addConstant("maxCrit", "max-severity-critical", 17179869184)
prop._addConstant("maxHigh", "max-crossed-high-threshold", 536870912)
prop._addConstant("maxLow", "max-crossed-low-threshold", 1073741824)
prop._addConstant("maxMajor", "max-severity-major", 8589934592)
prop._addConstant("maxMinor", "max-severity-minor", 4294967296)
prop._addConstant("maxRecovering", "max-recovering", 268435456)
prop._addConstant("maxWarn", "max-severity-warning", 2147483648)
prop._addConstant("minCrit", "min-severity-critical", 134217728)
prop._addConstant("minHigh", "min-crossed-high-threshold", 4194304)
prop._addConstant("minLow", "min-crossed-low-threshold", 8388608)
prop._addConstant("minMajor", "min-severity-major", 67108864)
prop._addConstant("minMinor", "min-severity-minor", 33554432)
prop._addConstant("minRecovering", "min-recovering", 2097152)
prop._addConstant("minWarn", "min-severity-warning", 16777216)
prop._addConstant("periodicCrit", "periodic-severity-critical", 1048576)
prop._addConstant("periodicHigh", "periodic-crossed-high-threshold", 32768)
prop._addConstant("periodicLow", "periodic-crossed-low-threshold", 65536)
prop._addConstant("periodicMajor", "periodic-severity-major", 524288)
prop._addConstant("periodicMinor", "periodic-severity-minor", 262144)
prop._addConstant("periodicRecovering", "periodic-recovering", 16384)
prop._addConstant("periodicWarn", "periodic-severity-warning", 131072)
prop._addConstant("rateCrit", "rate-severity-critical", 36028797018963968)
prop._addConstant("rateHigh", "rate-crossed-high-threshold", 1125899906842624)
prop._addConstant("rateLow", "rate-crossed-low-threshold", 2251799813685248)
prop._addConstant("rateMajor", "rate-severity-major", 18014398509481984)
prop._addConstant("rateMinor", "rate-severity-minor", 9007199254740992)
prop._addConstant("rateRecovering", "rate-recovering", 562949953421312)
prop._addConstant("rateWarn", "rate-severity-warning", 4503599627370496)
prop._addConstant("trendCrit", "trend-severity-critical", 281474976710656)
prop._addConstant("trendHigh", "trend-crossed-high-threshold", 8796093022208)
prop._addConstant("trendLow", "trend-crossed-low-threshold", 17592186044416)
prop._addConstant("trendMajor", "trend-severity-major", 140737488355328)
prop._addConstant("trendMinor", "trend-severity-minor", 70368744177664)
prop._addConstant("trendRecovering", "trend-recovering", 4398046511104)
prop._addConstant("trendWarn", "trend-severity-warning", 35184372088832)
prop._addConstant("unspecified", None, 0)
meta.props.add("unicastThr", prop)
prop = PropMeta("str", "unicastTr", "unicastTr", 54332, PropCategory.IMPLICIT_TREND)
prop.label = "host router ingress unicast bytes trend"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastTr", prop)
prop = PropMeta("str", "unicastTrBase", "unicastTrBase", 54331, PropCategory.IMPLICIT_TREND_BASE)
prop.label = "host router ingress unicast bytes trend baseline"
prop.isOper = True
prop.isStats = True
meta.props.add("unicastTrBase", prop)
# Deployment Meta
meta.deploymentQuery = True
meta.deploymentType = "Ancestor"
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudIgw", "From fv:Ctx to hcloud:Igw", "cobra.model.hcloud.Igw"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudVgw", "From fv:Ctx to hcloud:Vgw", "cobra.model.hcloud.Vgw"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudExtEPg", "From fvCtx (VRF) to cloudExtEPg", "cobra.model.cloud.ExtEPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudRegion", "From fvCtx (VRF) to CloudRegion", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHcloudCsr", "From fvCtx (VRF) to hcloudCsr (CSR)", "cobra.model.hcloud.Csr"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHCloudEndPoint", "From fvCtx (VRF) to hcloud:EndPoint", "cobra.model.hcloud.EndPoint"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToHCloudCtx", "From fvCtx (VRF) to hcloudCtx (VPC)", "cobra.model.hcloud.Ctx"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudCtxProfile", "From fvCtx (VRF) to cloudCtxProfile", "cobra.model.cloud.CtxProfile"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("FvCtxToCloudEPg", "From fvCtx (VRF) to cloud EPg", "cobra.model.cloud.EPg"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToRegion", "Vrf to cloud Region", "cobra.model.cloud.Region"))
meta.deploymentQueryPaths.append(DeploymentPathMeta("CtxToNwIf", "Private Network to Interface", "cobra.model.nw.If"))
def __init__(self, parentMoOrDn, markDirty=True, **creationProps):
namingVals = []
Mo.__init__(self, parentMoOrDn, markDirty, *namingVals, **creationProps)
# End of package file
# ##################################################
| [
"[email protected]"
] | |
17480f6f7565ee7617480674a2fadf166e70810a | 1f8a47641cb1c987f70dd7cf502d49e07ded52af | /backend/hss/hss/wsgi.py | 1de8e14bb33a901652171821ff9561d31b1a8d19 | [] | no_license | do-park/shawcheckredemption | 0fda66e3c1958b1ea27258de2da51b6bb9ce92ef | 8267e4d4ce4e815600bb4c21f7df878c8807d645 | refs/heads/ft_front | 2023-01-19T16:58:44.680144 | 2020-11-26T08:02:21 | 2020-11-26T08:02:21 | 316,159,702 | 1 | 1 | null | 2020-11-26T08:02:22 | 2020-11-26T07:55:14 | null | UTF-8 | Python | false | false | 383 | py | """
WSGI config for hss project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hss.settings')
application = get_wsgi_application()
| [
"[email protected]"
] | |
bdd0760e8844fd6ba461b3318c1347dc4022acd9 | b090cb9bc30ac595675d8aa253fde95aef2ce5ea | /trunk/test/NightlyRun/test405.py | 4234b88e576ef0f06697b7c02f12c1d1579361dc | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | eyhl/issm | 5ae1500715c258d7988e2ef344c5c1fd15be55f7 | 1013e74c28ed663ebb8c9d398d9be0964d002667 | refs/heads/master | 2022-01-05T14:31:23.235538 | 2019-01-15T13:13:08 | 2019-01-15T13:13:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 931 | py | #Test Name: SquareSheetShelfStressMHOPenalties
import numpy as np
from model import *
from socket import gethostname
from triangle import *
from setmask import *
from parameterize import *
from setflowequation import *
from solve import *
md=triangle(model(),'../Exp/Square.exp',180000.)
md=setmask(md,'../Exp/SquareShelf.exp','')
md=parameterize(md,'../Par/SquareSheetShelf.py')
md.extrude(5,1.)
md=setflowequation(md,'SSA','../Exp/SquareHalfRight.exp','fill','HO','coupling','penalties')
md.cluster=generic('name',gethostname(),'np',3)
md=solve(md,'Stressbalance')
#Fields and tolerances to track changes
field_names =['Vx','Vy','Vz','Vel','Pressure']
field_tolerances=[5e-05,5e-05,5e-05,5e-05,1e-05]
field_values=[\
md.results.StressbalanceSolution.Vx,\
md.results.StressbalanceSolution.Vy,\
md.results.StressbalanceSolution.Vz,\
md.results.StressbalanceSolution.Vel,\
md.results.StressbalanceSolution.Pressure,\
]
| [
"[email protected]"
] | |
147eafbcdb47571b8ec157075995bcb513a53efa | f167dffa2f767a0419aa82bf434852069a8baeb8 | /lib/youtube_dl/extractor/kankan.py | a677ff44712794ef54f53a1afe9c55fbacad91e2 | [
"MIT"
] | permissive | firsttris/plugin.video.sendtokodi | d634490b55149adfdcb62c1af1eb77568b8da3f5 | 1095c58e2bc21de4ab6fcb67a70e4f0f04febbc3 | refs/heads/master | 2023-08-18T10:10:39.544848 | 2023-08-15T17:06:44 | 2023-08-15T17:06:44 | 84,665,460 | 111 | 31 | MIT | 2022-11-11T08:05:21 | 2017-03-11T16:53:06 | Python | UTF-8 | Python | false | false | 1,738 | py | from __future__ import unicode_literals
import re
import hashlib
from .common import InfoExtractor
_md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class KankanIE(InfoExtractor):
_VALID_URL = r'https?://(?:.*?\.)?kankan\.com/.+?/(?P<id>\d+)\.shtml'
_TEST = {
'url': 'http://yinyue.kankan.com/vod/48/48863.shtml',
'md5': '29aca1e47ae68fc28804aca89f29507e',
'info_dict': {
'id': '48863',
'ext': 'flv',
'title': 'Ready To Go',
},
'skip': 'Only available from China',
}
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
title = self._search_regex(r'(?:G_TITLE=|G_MOVIE_TITLE = )[\'"](.+?)[\'"]', webpage, 'video title')
surls = re.search(r'surls:\[\'.+?\'\]|lurl:\'.+?\.flv\'', webpage).group(0)
gcids = re.findall(r'http://.+?/.+?/(.+?)/', surls)
gcid = gcids[-1]
info_url = 'http://p2s.cl.kankan.com/getCdnresource_flv?gcid=%s' % gcid
video_info_page = self._download_webpage(
info_url, video_id, 'Downloading video url info')
ip = self._search_regex(r'ip:"(.+?)"', video_info_page, 'video url ip')
path = self._search_regex(r'path:"(.+?)"', video_info_page, 'video url path')
param1 = self._search_regex(r'param1:(\d+)', video_info_page, 'param1')
param2 = self._search_regex(r'param2:(\d+)', video_info_page, 'param2')
key = _md5('xl_mp43651' + param1 + param2)
video_url = 'http://%s%s?key=%s&key1=%s' % (ip, path, key, param2)
return {
'id': video_id,
'title': title,
'url': video_url,
}
| [
"[email protected]"
] | |
93676b1ccd5b8f20a34473032ab5d4db03956a52 | 6b2a8dd202fdce77c971c412717e305e1caaac51 | /solutions_5634947029139456_1/Python/AlonH/2014A1A.py | a02bf27c55c7c31dc651a2d0b8c8393d949d0274 | [] | no_license | alexandraback/datacollection | 0bc67a9ace00abbc843f4912562f3a064992e0e9 | 076a7bc7693f3abf07bfdbdac838cb4ef65ccfcf | refs/heads/master | 2021-01-24T18:27:24.417992 | 2017-05-23T09:23:38 | 2017-05-23T09:23:38 | 84,313,442 | 2 | 4 | null | null | null | null | UTF-8 | Python | false | false | 1,292 | py | def match(now,needed):
now.sort()
needed.sort()
for i in range(len(now)):
if now[i] != needed[i]:
return True
return False
def count(ar,l):
ret = [0]*l
for i in range(l):
for s in ar:
ret[i] += int(s[i])
return(ret)
def compare(n,o,l):
ret = [0]*l
for i in range(l):
if n[i] != o[i]:
ret[i] = 1
return tuple(ret)
f = open("A-large.in","r")
o = open("A-large-answers.txt","w")
T = int(f.readline())
for t in range(1,T+1):
inp = [int(a) for a in f.readline().split()]
n = inp[0]
l = inp[1]
lifts = [0]*l
start = [a for a in f.readline().split()]
needed = [a for a in f.readline().split()]
cnow = count(start,l)
cneeded = count(needed,l)
print("case",t,cnow,cneeded,start,needed)
op = set([compare(start[0],n,l) for n in needed])
for i in range(1,n):
op1 = set([compare(start[i],n,l) for n in needed])
op = op&op1
if len(op) == 0:
o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
else:
o.write("Case #"+str(t)+": "+str(min([a.count(1) for a in op]))+"\n")
o.close()
#o.write("Case #"+str(t)+": NOT POSSIBLE"+"\n")
#o.write("Case #"+str(t)+": "+str(lifts.count(1))+"\n")
| [
"[email protected]"
] | |
48d5dad900ecdf584f2ec639d5b62ce8f06d2c2c | 82074ba616918ede605dec64b038546a7b07bd7d | /empowerb/middleware.py | 85764001431cfbf3cedb9002fb6e1ccf8f38b859 | [] | no_license | chetankhopade/EmpowerRM | b7ab639eafdfa57c054a0cf9da15c3d4b90bbd66 | 8d968592f5e0d160c56f31a4870e79c30240b514 | refs/heads/main | 2023-07-05T03:20:13.820049 | 2021-08-26T11:56:28 | 2021-08-26T11:56:28 | 399,354,317 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | from _contextvars import ContextVar
from django.utils.deprecation import MiddlewareMixin
from empowerb.settings import DATABASES
db_ctx = ContextVar('var')
class WhichDatabaseIsTOUseMIddleware(MiddlewareMixin):
"""
Middleware to update the context var with the db alias
"""
@staticmethod
def process_request(request):
try:
db_name_path = request.path.split('/')[1]
db_name = db_name_path.split('_')[0] if '_' in db_name_path else db_name_path
# set contextvar with the database name if dbname exist in DATABASES dict
db_ctx.set(db_name) if db_name in DATABASES.keys() else db_ctx.set('NoOP')
except Exception as ex:
print(ex.__str__())
db_ctx.reset('NoOP')
| [
"[email protected]"
] | |
628a26377a4ac11054ec002268c2916d3883eccf | 601e6891504cc9da063e3ef9993e7b5f142bbe35 | /examples/wifiStationsAndHosts.py | 8e68f22b85c8a2a65ee1968f5506555a2831560b | [] | no_license | caiqiqi/mininet-wifi | b8a13f83e4fbadea20865faecf6719abf8e68437 | 547cf3c01d85b9bfb38b3e9df3b5c52119b5b5e2 | refs/heads/master | 2021-01-20T16:44:34.270734 | 2016-05-16T12:55:56 | 2016-05-16T12:55:56 | 58,878,807 | 0 | 0 | null | 2016-05-15T19:01:01 | 2016-05-15T19:01:01 | null | UTF-8 | Python | false | false | 1,177 | py | #!/usr/bin/python
"""
This example shows how work with wireless and wired media
"""
from mininet.net import Mininet
from mininet.node import Controller, OVSKernelSwitch
from mininet.cli import CLI
from mininet.log import setLogLevel
from mininet.link import TCLink
def topology():
"Create a network."
net = Mininet( controller=Controller, link=TCLink, switch=OVSKernelSwitch )
print "*** Creating nodes"
ap1 = net.addBaseStation( 'ap1', ssid="simplewifi", mode="g", channel="5" )
sta1 = net.addStation( 'sta1', ip='192.168.0.1/24' )
sta2 = net.addStation( 'sta2', ip='192.168.0.2/24' )
h3 = net.addHost( 'h3', ip='192.168.0.3/24' )
h4 = net.addHost( 'h4', ip='192.168.0.4/24' )
c0 = net.addController('c0', controller=Controller, ip='127.0.0.1' )
print "*** Adding Link"
net.addLink(sta1, ap1)
net.addLink(sta2, ap1)
net.addLink(h3, ap1)
net.addLink(h4, ap1)
print "*** Starting network"
net.build()
c0.start()
ap1.start( [c0] )
print "*** Running CLI"
CLI( net )
print "*** Stopping network"
net.stop()
if __name__ == '__main__':
setLogLevel( 'info' )
topology()
| [
"[email protected]"
] | |
4f9cdc759c20a19b123b187ed82e7d01eb37bd48 | 8827574a663cc9d18194eb355dce7ffb676e6d0b | /everest/transit.py | 8958b3c59d794095e0ea42a5548d12f5aa0ef602 | [
"MIT"
] | permissive | mirca/everest | 70a79432f6cd2b604a64fc4c97c7513bbe2a6406 | b96cc5cd1949b81ddc49fb74b90bf5a95c6fca14 | refs/heads/master | 2021-01-13T05:56:00.206244 | 2017-03-17T16:35:49 | 2017-03-17T16:35:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,317 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`transit.py` - Transit models
-------------------------------------
These are routines used to generate a transit model, primarily for
transit injection/recovery tests. These are wrappers around
:py:func:`pysyzygy.Transit`, with the added feature that
the transit :py:obj:`depth` and the transit :py:obj:`duration` can be specified
as input variables (as opposed to the planet-star radius ratio
and the stellar density, which :py:mod:`pysyzygy` expects).
'''
from __future__ import division, print_function, absolute_import, unicode_literals
import numpy as np
import matplotlib.pyplot as pl
import pysyzygy as ps
from scipy.optimize import fmin
import logging
log = logging.getLogger(__name__)
class TransitModel(object):
'''
'''
def __init__(self, name, sig_RpRs = 0.001, **kwargs):
'''
'''
# The planet/transit model ID
assert type(name) is str, "Arg `name` must be a string."
self.name = name
# The transit model
self._transit = ps.Transit(**kwargs)
# Compute the depth
times = kwargs.get('times', None)
if times is not None:
t0 = times[0]
else:
t0 = kwargs.get('t0', 0.)
self.depth = (1. - self._transit([t0]))[0]
# Approximate variance on the depth
self.var_depth = (2 * sig_RpRs) ** 2
# Save the kwargs
self.params = kwargs
def __call__(self, time):
'''
'''
model = (self._transit(time) - 1) / self.depth
return model
def Get_RpRs(d, **kwargs):
'''
Returns the value of the planet radius over the stellar radius for a given depth :py:obj:`d`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
def Depth(RpRs, **kwargs):
return 1 - ps.Transit(RpRs = RpRs, **kwargs)([kwargs.get('t0', 0.)])
def DiffSq(r):
return 1.e10 * (d - Depth(r, **kwargs)) ** 2
return fmin(DiffSq, [np.sqrt(d)], disp = False)
def Get_rhos(dur, **kwargs):
'''
Returns the value of the stellar density for a given transit duration :py:obj:`dur`, given
the :py:class:`everest.pysyzygy` transit :py:obj:`kwargs`.
'''
assert dur >= 0.01 and dur <= 0.5, "Invalid value for the duration."
def Dur(rhos, **kwargs):
t0 = kwargs.get('t0', 0.)
time = np.linspace(t0 - 0.5, t0 + 0.5, 1000)
try:
t = time[np.where(ps.Transit(rhos = rhos, **kwargs)(time) < 1)]
except:
return 0.
return t[-1] - t[0]
def DiffSq(rhos):
return (dur - Dur(rhos, **kwargs)) ** 2
return fmin(DiffSq, [0.2], disp = False)
def Transit(time, t0 = 0., dur = 0.1, per = 3.56789, depth = 0.001, **kwargs):
'''
A `Mandel-Agol <http://adsabs.harvard.edu/abs/2002ApJ...580L.171M>`_ transit model,
but with the depth and the duration as primary input variables.
:param numpy.ndarray time: The time array
:param float t0: The time of first transit in units of :py:obj:`BJD` - 2454833.
:param float dur: The transit duration in days. Don't go too crazy on this one -- very small \
or very large values will break the inverter. Default 0.1
:param float per: The orbital period in days. Default 3.56789
:param float depth: The fractional transit depth. Default 0.001
:param dict kwargs: Any additional keyword arguments, passed directly to :py:func:`everest.pysyzygy.Transit`
:returns tmod: The transit model evaluated at the same times as the :py:obj:`time` array
'''
# Note that rhos can affect RpRs, so we should really do this iteratively,
# but the effect is pretty negligible!
RpRs = Get_RpRs(depth, t0 = t0, per = per, **kwargs)
rhos = Get_rhos(dur, t0 = t0, per = per, **kwargs)
return ps.Transit(t0 = t0, per = per, RpRs = RpRs, rhos = rhos, **kwargs)(time)
class TransitShape(object):
'''
'''
def __init__(self, depth = 1, window = 0.5, **kwargs):
'''
'''
kwargs.pop('t0', None)
kwargs.pop('times', None)
t = np.linspace(-window / 2, window / 2, 5000)
trn = ps.Transit(t0 = 0., **kwargs)
transit_model = trn(t)
transit_model -= 1
transit_model *= depth / (1 - trn([0.])[0])
self.x = t
self.y = transit_model
def __call__(self, time, t0 = 0.):
'''
'''
return np.interp(time, self.x + t0, self.y) | [
"[email protected]"
] | |
14679579a6dbc0f503f5b3d8562401165ce94756 | 91deb97afda334c5366e560325995cf6b5407bee | /src/command_modules/azure-cli-billing/azure/cli/command_modules/billing/custom.py | 21965f255ffa9f5644f22068a40961f8ca75b5a3 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | viananth/azure-cli | ab117c1b0b676026cbb57567544cd70630efe830 | 4d23492ed03e946cfc11bae23b29acb971fb137d | refs/heads/master | 2021-05-23T05:13:51.414113 | 2017-08-17T16:58:10 | 2017-08-17T16:58:10 | 95,239,804 | 0 | 0 | NOASSERTION | 2019-03-19T18:45:16 | 2017-06-23T17:01:34 | Python | UTF-8 | Python | false | false | 891 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
def cli_billing_list_invoices(client, generate_url=False):
"""List all available invoices of the subscription"""
invoices = client.list(expand='downloadUrl' if generate_url else None)
return list(invoices)
def cli_billing_get_invoice(client, name=None):
"""Retrieve invoice of specific name of the subscription"""
if name:
return client.get(name)
return client.get_latest()
def cli_billing_list_periods(client):
"""List all available billing periods of the subscription"""
return list(client.list())
| [
"[email protected]"
] | |
1390b2d3b283c49021827414a5f0ca6601dd27e8 | 1cfafec5935522b386d40ab7bb7246f39da89fcc | /temp/20201221_naver_ai_handsonsummit.py | ff8784adba43b2a7a15adeb0447977ce5373c919 | [] | no_license | madfalc0n/my_coding_labs | 0d9e13e2d1579607d5481c6a78baa70a2c7c374a | b38fd988a5e3ebb8d8b66bf5a0b15eb3eaa20578 | refs/heads/master | 2021-07-03T17:33:16.801207 | 2021-06-18T06:24:09 | 2021-06-18T06:24:09 | 241,097,976 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import sys
import requests
client_id = "CID"
client_secret = "CSECRET"
lang = "Kor" # 언어 코드 ( Kor, Jpn, Eng, Chn )
url = "https://naveropenapi.apigw.ntruss.com/recog/v1/stt?lang=" + lang
data = open('filepath', 'rb')
headers = {
"X-NCP-APIGW-API-KEY-ID": client_id,
"X-NCP-APIGW-API-KEY": client_secret,
"Content-Type": "application/octet-stream"
}
response = requests.post(url, data=data, headers=headers)
rescode = response.status_code
if(rescode == 200):
print (response.text)
else:
print("Error : " + response.text)
| [
"[email protected]"
] | |
67558e1d4c168ae6ffe706cae7b73d5b96991949 | 91d1a6968b90d9d461e9a2ece12b465486e3ccc2 | /ec2_write_1/client-vpn-client-certificate-revocation-list_export.py | 590e1a0b6582eaa6d0a43363fd3ba344c40e4825 | [] | no_license | lxtxl/aws_cli | c31fc994c9a4296d6bac851e680d5adbf7e93481 | aaf35df1b7509abf5601d3f09ff1fece482facda | refs/heads/master | 2023-02-06T09:00:33.088379 | 2020-12-27T13:38:45 | 2020-12-27T13:38:45 | 318,686,394 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,091 | py | #!/usr/bin/python
# -*- codding: utf-8 -*-
import os
import sys
sys.path.append(os.path.dirname(os.path.abspath(os.path.dirname(__file__))))
from common.execute_command import write_one_parameter
# url : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/export-client-vpn-client-certificate-revocation-list.html
if __name__ == '__main__':
"""
import-client-vpn-client-certificate-revocation-list : https://awscli.amazonaws.com/v2/documentation/api/latest/reference/ec2/import-client-vpn-client-certificate-revocation-list.html
"""
parameter_display_string = """
# client-vpn-endpoint-id : The ID of the Client VPN endpoint.
"""
add_option_dict = {}
#######################################################################
# parameter display string
add_option_dict["parameter_display_string"] = parameter_display_string
# ex: add_option_dict["no_value_parameter_list"] = "--single-parameter"
write_one_parameter("ec2", "export-client-vpn-client-certificate-revocation-list", "client-vpn-endpoint-id", add_option_dict)
| [
"[email protected]"
] | |
4c6a4945f123306bcdf31d8b8f17939c2b32cc2f | 094304d0aa7cb6949c0f471d1c432dc7db5a4c2a | /VIRSCAN/vir_scan_db.py | 1358ecd8d77cb2b4438e1bd9cfcaacc0392ee70c | [] | no_license | smallmeet/fangzhen | 7f8e232b87841b88268d14133479846e48e33ba1 | d0cbf09eba98c835a4ea013889a8cf0b34263d0d | refs/heads/master | 2020-12-24T22:28:54.972613 | 2016-08-12T09:24:15 | 2016-08-12T09:24:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,005 | py | from base_db import MysqlClient
from get_conf import GetConf
class VirScanMysqlClient:
def __init__(self, conf):
self.mysql_client = MysqlClient(conf)
def insert_vir_scan(self, args):
self.mysql_client.insert('PRO_VIR_SCAN_INSERT', args)
def insert_apk_black_list(self, args):
self.mysql_client.insert('PRO_APK_BLACK_LIST_INSERT', args)
def update_apk_black_list(self, args):
self.mysql_client.insert('PRO_APK_BLACK_LIST_UPDATE', args)
def select_vir_scan(self, args):
return self.mysql_client.select('PRO_VIR_SCAN_SELECT', args)
def select_apk_black_list_info(self, args):
return self.mysql_client.select('PRO_APK_BLACK_LIST_SELECT', args)
def fetch_apk_black_list_info(self, args):
return self.mysql_client.select('PRO_APK_BLACK_LIST_FETCH', args)
if __name__ == '__main__':
get_conf = GetConf('')
mysql_client = VirScanMysqlClient(get_conf)
# mysql_client.get_app_info()
# mysql_client.insert_data()
| [
"[email protected]"
] | |
e80086c7681aba8a3e9db60de523efc0dda13b05 | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p02886/s458066707.py | c314b1e82e4800300ac6e420a37e53e4fca14534 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 98 | py | n=int(input())
d=list(map(int,input().split()))
d2=[m**2 for m in d]
print((sum(d)**2-sum(d2))//2) | [
"[email protected]"
] | |
022be4db452f8ecc1c423f41fa77963d1855a30e | 9e21ee282d0a567b42a96f938f61d655eb2d5940 | /chat_room/tests/test_base.py | 1262db10f3b75ee65a9f2fb0e64f06f887ac4d2a | [] | no_license | smolynets/chat-interface | e0ac815639dd993f029f331a545c5c5932785569 | 3b66970c241eb1660b60a612aceffde36223eff4 | refs/heads/master | 2021-06-12T02:19:47.749561 | 2019-07-13T12:42:21 | 2019-07-13T12:42:21 | 191,516,912 | 0 | 0 | null | 2021-06-10T18:21:22 | 2019-06-12T07:06:21 | Python | UTF-8 | Python | false | false | 1,496 | py | """
This test is inherited by tests of other apps.
"""
from django.urls import reverse
from rest_framework.test import APIClient, APITestCase
from rest_framework_simplejwt.settings import api_settings
from ..models import User
class APIRestAuthJWTClient(APIClient):
"""
APIRestAuthJWTClient class.
Login with jwt tokens.
"""
def login(self, login_name="login", **credentials):
"""
Login method.
Get tokens, if successful login.
"""
login_endpoint = reverse(login_name)
login_response = self.post(login_endpoint, credentials, format="json")
if login_response.status_code == 200:
self.credentials(
HTTP_AUTHORIZATION="{0} {1}".format(
api_settings.defaults["AUTH_HEADER_TYPES"][0],
login_response.data["access"]
)
)
return True
else:
return False
class APITestBaseClass(APITestCase):
"""
APITestBaseClass class.
Get APITestBaseClass.
"""
def setUp(self):
"""
Creeate User.
"""
self.user = User.objects.create_user(
username="test_user",
email="[email protected]",
password="password"
)
self.user_two = User.objects.create_user(
username="test2_user",
email="[email protected]",
password="password"
)
client_class = APIRestAuthJWTClient
| [
"[email protected]"
] | |
1547ae20bcab955d0bc53826f0c25ebaf5c0ca77 | 163bbb4e0920dedd5941e3edfb2d8706ba75627d | /Code/CodeRecords/2645/60768/316508.py | 65bd1d2918995f2d545307313de4c81f073d0279 | [] | no_license | AdamZhouSE/pythonHomework | a25c120b03a158d60aaa9fdc5fb203b1bb377a19 | ffc5606817a666aa6241cfab27364326f5c066ff | refs/heads/master | 2022-11-24T08:05:22.122011 | 2020-07-28T16:21:24 | 2020-07-28T16:21:24 | 259,576,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 481 | py | piles = eval(input())
h = int(input())
max_k = max(piles)
min_k = int(sum(piles) / h)
re = max_k
for k in range(min_k, max_k + 1):
time = 0
bananas = [i for i in piles]
while len(bananas) > 0:
for i in range(len(bananas)):
bananas[i] = bananas[i] - k
time += 1
if bananas[i] < 0:
bananas[i] = 0
while 0 in bananas:
bananas.remove(0)
if time <= h:
re = k
break
print(re) | [
"[email protected]"
] | |
3c78d94f96313789cdcab22a4e37af4d6683944f | dcda5ba16474dd8ff650e04e7f4a9bf700f6a9ff | /manage.py | e4582d9147c1e546b5dee4a9c82e5fcceb52ac75 | [] | no_license | 007vict/shopbyexample | 2084d6e53faafb5c7e856cc8b3a5ff43bc3a82e2 | bc7dcfe5818499731c3cbf956c9c0b95cf3791da | refs/heads/master | 2022-12-21T13:05:08.425653 | 2019-04-10T10:30:41 | 2019-04-10T10:30:41 | 177,291,341 | 0 | 0 | null | 2022-12-08T04:58:00 | 2019-03-23T13:18:59 | JavaScript | UTF-8 | Python | false | false | 547 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'myshopbyexample.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"[email protected]"
] | |
8a6f357264b2dbc8114efa7cb34e8a02b9be2820 | 63b814265ab49ebc2ed8e62577757991119be83b | /data-quality/kalman-filt.py | d10a0e87d25b724b1f8289d92c13e7a3168ac9bd | [] | no_license | wisecg/mjd-analysis | 7de4e67c34c19215984f528f31f71a8e584e1e91 | ca4f00a767f2dfe6d460b44c700e2b59fe0bb296 | refs/heads/master | 2020-12-07T21:28:34.376478 | 2017-08-28T15:20:17 | 2017-08-28T15:20:17 | 65,919,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,108 | py | """
The idea is to take plots like "gmax" and "gbase" in data-quality.cc
and apply a Kalman filter to them, to look for "extrema"
http://scipy-cookbook.readthedocs.io/items/KalmanFiltering.html
Intro to Kalman filters:
http://www.cs.unc.edu/~welch/media/pdf/kalman_intro.pdf
Ben says:
everything is “basically linear,” which in electronics engineering speak means made of gaussians
so you model it with a bunch of kalman filters
and that gives you a statistically robust way to look for discontinuities or other jumps
its how they monitor parameters at like a gas turbine plant or jet engine or shit like that
its called fault detection and is a big component of controls engineering
its like wildly unexciting
but sort of cool math
but, like, say you want to monitor stability of a peak or whatever
you can make a bunch of plots of that peak position and look at them by eye
or you can have a filter that looks at the position vs time and says WOAH WTF BRO if it jumps
kalman filters are markov chain way to do that
and you know we roll markov style up in this bitch
same with rates or whatever
""" | [
"[email protected]"
] | |
5509f28877444ba0ac97b513a2106dbc9ddd0995 | ea0c0b8d67a42086f840149b3dbe1c0e4f58e56f | /members_area/migrations/0005_auto_20200129_2122.py | 12832e8dc63b3301b343c645b65d640d95c3d93b | [
"MIT"
] | permissive | AzeezBello/raodoh | 78b27e0886f8882144a4def160d9c3f53bcc6af9 | 296bd44069bd750557bf49995374601f5052d695 | refs/heads/master | 2022-05-03T05:07:21.632642 | 2020-02-26T10:16:08 | 2020-02-26T10:16:08 | 235,878,080 | 0 | 0 | MIT | 2022-04-22T23:01:27 | 2020-01-23T20:15:39 | JavaScript | UTF-8 | Python | false | false | 481 | py | # Generated by Django 2.2.9 on 2020-01-29 20:22
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('members_area', '0004_auto_20200128_2330'),
]
operations = [
migrations.AlterField(
model_name='lesson',
name='course',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='members_area.Course'),
),
]
| [
"[email protected]"
] | |
656bb960ef1d2fd531df0a667c4d97135b95bcb1 | dd5ee6d1e88527cd22f1b64443320ba8ef751b59 | /rlcard3/envs/mocsar.py | 2b3edd2b9df42da117998cd3dd3b41bf88e15885 | [
"MIT"
] | permissive | sorata2894/rlcard3 | 42a2587e3ab00f3a33c684fb76efbc334a835359 | e9bbd36b789e670f96622a3a2ba8327f0d897561 | refs/heads/master | 2022-11-05T00:08:10.809055 | 2020-06-11T03:28:41 | 2020-06-11T03:28:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,854 | py | """
Mocsár Environment
File name: envs/gmocsar.py
Author: József Varga
Date created: 3/27/2020
"""
from rlcard3 import models
from rlcard3.envs.env import Env
from rlcard3.games.mocsar.game import MocsarGame as Game
from rlcard3.games.mocsar.utils import action_to_string, \
string_to_action, payoff_func, print_state, encode_to_obs
from typing import List
class MocsarEnv(Env):
""" GinRummy Environment
"""
state_shape: List[int] # Dimensions of state numpy array
def __init__(self, config):
self.game = Game()
self.state_shape = [3, 9, 14]
super().__init__(config=config)
def _extract_state(self, state): # 200213 don't use state ???
"""
Extract useful information from state for RL. Must be implemented in the child class.
numpy(3,9,14)
Menaing: x,y,z
z: 1/0, 1 means, the hand contains y amount of card.
y: rank of cards in some hand.
x=0: player's hand
x=1: others hand
x=2: target
x>2: history, not implemented....
:param state: dict, the raw state
:return: dict: 'obs':the extracted state, numpy.array, 'legal_actions': list of actions
"""
obs = encode_to_obs(state=state)
extracted_state = {'obs': obs,
'legal_actions': self._get_legal_actions(),
'is_extract': True # State is extracted>
}
return extracted_state
def get_payoffs(self):
"""
Get the payoffs of players. Must be implemented in the child class.
First one scores 1, Last one scores 0. Other ith player scores 0.5 ^^i
:return: A list of payoffs for each player.
"""
num_players = self.game.num_players
# winnersben a győzelmek sorrendje van
# List indexed by PlayerID instead of OrderId, pl [1,3,2,0]
win_id = [self.game.players.winners.index(i) for i in range(num_players)]
# win_id-ben, meg az, hogy az adott indexű játékos hányadik, pl [3,0,2,1], mivel a 0-ik indexű játékos utolsó=3
payoffs = [payoff_func(position=win_id[i], num_players=num_players) for i in range(num_players)]
return payoffs
def _decode_action(self, action_id):
"""
Decode Action id to the action in the game.
:param action_id: The id of the action
:return: The action that will be passed to the game engine.
"""
return action_to_string(action=action_id)
def _get_legal_actions(self):
"""
Get all legal actions for current state.
:return: A list of legal actions' id.
"""
return [string_to_action(action) for action in self.game.get_legal_actions()]
def _load_model(self):
"""
Load pretrained/rule model
:return: A Model object
"""
return models.load('mocsar-rule-v1', num_players=self.game.get_player_num())
def print_state(self, player: int):
"""
Print out the state of a given player
:param player: Player Id to print
"""
state = self.game.get_state(player)
print_state(state)
def print_result(self, player):
"""
Print the game result when the game is over
:param player: Player Id to print
"""
payoffs = self.get_payoffs()
for player_ in self.game.players.players:
print(f"Player {player_.__str__()} : points {payoffs[player_.player_id]}")
@staticmethod
def print_action(action: str):
"""
Print out an action in a nice form
:param action: Code of the action
"""
if type(action) is tuple:
action, _ = action
print(f"\nAction code:{string_to_action(action)}, action:{action}")
| [
"[email protected]"
] | |
afa233f76cb5afeb5878c1f8371c6ee8b5e88667 | 5ed795f324b1f94ded479a22f60580d9f41a114b | /dashboard/migrations/0007_auto_20190212_1753.py | 231eeaf5940eeed0afb26eda070c777986ca996d | [] | no_license | ashutoshdev/Django-Main-news-wesbite | 907f52a131e136072a585c903c906adb19457765 | 9a934255465d73ab12e16031fb99ad5847b65b55 | refs/heads/master | 2023-08-23T20:27:40.286701 | 2021-10-21T02:03:49 | 2021-10-21T02:03:49 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | # Generated by Django 2.0 on 2019-02-12 12:23
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('dashboard', '0006_rfcompany'),
]
operations = [
migrations.CreateModel(
name='DashboardCompany',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('companyname', models.TextField(default='', verbose_name='companyname')),
('bannerloads', models.PositiveIntegerField(default=0)),
('clicks', models.PositiveIntegerField(default=0)),
('date', models.DateField(auto_now_add=True)),
('time', models.TimeField(auto_now_add=True)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
migrations.AddField(
model_name='rfcompany',
name='date',
field=models.DateField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
migrations.AddField(
model_name='rfcompany',
name='time',
field=models.TimeField(auto_now_add=True, default=django.utils.timezone.now),
preserve_default=False,
),
]
| [
"[email protected]"
] | |
76faf3fd6ac8be77e3a1174cf85ff9d069e4638a | 96c970ebacd9ade1493f4d01537005788b43a49b | /pychron/experiment/tasks/experiment_actions.py | 1449ab2015d4a6e355f16e188813b9cb6753b314 | [
"Apache-2.0"
] | permissive | OSUPychron/pychron | d2da9051b68024200d0009de634da810ccef2a0d | fe0ba9daff9548fa8bebab26db66a1cefff7c1d6 | refs/heads/master | 2021-01-14T12:47:26.389887 | 2015-12-18T22:27:02 | 2015-12-18T22:27:02 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,537 | py | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
from pyface.message_dialog import warning
from pyface.tasks.task_window_layout import TaskWindowLayout
from pychron.core.helpers.filetools import get_path
from pychron.envisage.tasks.actions import PAction as Action, PTaskAction as TaskAction
# ============= standard library imports ========================
import os
# ============= local library imports ==========================
from pychron.envisage.resources import icon
from pychron.paths import paths
EXP_ID = 'pychron.experiment.task'
class ResetSystemHealthAction(Action):
name = 'Reset System Health'
dname = 'Reset System Health'
def perform(self, event):
from pychron.experiment.health.series import reset_system_health_series
reset_system_health_series()
class ExperimentAction(Action):
task_id = EXP_ID
# def _get_experimentor(self, event):
# return self._get_service(event, 'pychron.experiment.experimentor.Experimentor')
def _get_service(self, event, name):
app = event.task.window.application
return app.get_service(name)
def _open_editor(self, event):
application = event.task.window.application
application.open_task(self.task_id)
class ConfigureEditorTableAction(TaskAction):
name = 'Configure Experiment Table'
dname = 'Configure Experiment Table'
method = 'configure_experiment_table'
class BasePatternAction(TaskAction):
_enabled = None
def _task_changed(self):
if self.task:
if hasattr(self.task, 'open_pattern'):
enabled = True
if self.enabled_name:
if self.object:
enabled = bool(self._get_attr(self.object,
self.enabled_name, False))
if enabled:
self._enabled = True
else:
self._enabled = False
def _enabled_update(self):
"""
reimplement ListeningAction's _enabled_update
"""
if self.enabled_name:
if self.object:
self.enabled = bool(self._get_attr(self.object,
self.enabled_name, False))
else:
self.enabled = False
elif self._enabled is not None:
self.enabled = self._enabled
else:
self.enabled = bool(self.object)
class OpenPatternAction(BasePatternAction):
name = 'Open Pattern...'
dname = 'Open Pattern'
method = 'open_pattern'
class NewPatternAction(BasePatternAction):
name = 'New Pattern...'
dname = 'New Pattern'
method = 'new_pattern'
class SendTestNotificationAction(TaskAction):
name = 'Send Test Notification'
dname = 'Send Test Notification'
method = 'send_test_notification'
# accelerator = 'Ctrl+Shift+N'
class DeselectAction(TaskAction):
name = 'Deselect'
dname = 'Deselect'
method = 'deselect'
tooltip = 'Deselect the selected run(s)'
id = 'pychron.deselect'
class UndoAction(TaskAction):
name = 'Undo'
dname = 'Undo'
method = 'undo'
accelerator = 'Ctrl+Z'
class QueueConditionalsAction(Action):
name = 'Edit Queue Conditionals'
dname = 'Edit Queue Conditionals'
def perform(self, event):
task = event.task
if hasattr(task, 'edit_queue_conditionals'):
# edit the current queue's conditionals
task.edit_queue_conditionals()
else:
# choose a conditionals file to edit
from pychron.experiment.conditional.conditionals_edit_view import edit_conditionals
dnames = None
spec = task.application.get_service(
'pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager')
if spec:
dnames = spec.spectrometer.detector_names
edit_conditionals(None, detectors=dnames, app=task.application)
class SystemConditionalsAction(Action):
name = 'Edit System Conditionals'
dname = 'Edit System Conditionals'
def perform(self, event):
from pychron.experiment.conditional.conditionals_edit_view import edit_conditionals
task = event.task
dnames = None
spec = task.application.get_service(
'pychron.spectrometer.base_spectrometer_manager.BaseSpectrometerManager')
if spec:
dnames = spec.spectrometer.detector_names
p = get_path(paths.spectrometer_dir, '.*conditionals', ('.yaml','.yml'))
if p:
edit_conditionals(p, detectors=dnames, app=task.application)
else:
warning(None, 'No system conditionals file at {}'.format(p))
def open_experiment(event, path):
app = event.task.window.application
task = event.task
if task.id == EXP_ID:
task.open(path)
else:
task = app.get_task(EXP_ID, False)
if task.open(path):
task.window.open()
# class QueueAction(ExperimentAction):
# def _open_experiment(self, event, path=None):
# open_experiment(event, path)
class NewExperimentQueueAction(ExperimentAction):
description = 'Create a new experiment queue'
name = 'New Experiment'
dname = 'New Experiment'
id = 'pychron.new_experiment'
def perform(self, event):
if event.task.id == EXP_ID:
event.task.new()
else:
application = event.task.window.application
win = application.create_window(TaskWindowLayout(EXP_ID))
task = win.active_task
if task.new():
win.open()
class OpenExperimentHistoryAction(Action):
name = 'Experiment Launch History'
dname = 'Experiment Launch History'
def perform(self, event):
from pychron.experiment.experiment_launch_history import ExperimentLaunchHistory
elh = ExperimentLaunchHistory()
elh.load()
info = elh.edit_traits()
if info.result:
if elh.selected:
open_experiment(event, elh.selected.path)
class OpenLastExperimentQueueAction(ExperimentAction):
description = 'Open last executed experiment'
name = 'Open Last Experiment...'
dname = 'Open Last Experiment'
id = 'pychron.open_last_experiment'
def __init__(self, *args, **kw):
super(OpenLastExperimentQueueAction, self).__init__(*args, **kw)
self.enabled = bool(self._get_last_experiment())
def perform(self, event):
path = self._get_last_experiment()
if path:
open_experiment(event, path)
else:
warning(None, 'No last experiment available')
# if os.path.isfile(paths.last_experiment):
# with open(paths.last_experiment, 'r') as rfile:
# path = fp.readline()
# if os.path.isfile(path):
# self._open_experiment(event, path)
# else:
# print 'asdfasdf', path
# else:
# warning(None, 'No last experiment available')
def _get_last_experiment(self):
if os.path.isfile(paths.last_experiment):
with open(paths.last_experiment, 'r') as rfile:
path = rfile.readline()
if os.path.isfile(path):
return path
class OpenExperimentQueueAction(ExperimentAction):
description = 'Open experiment'
name = 'Open Experiment...'
dname = 'Open Experiment'
image = icon('project-open')
id = 'pychron.open_experiment'
def perform(self, event):
path = '/Users/ross/Pychron_dev/experiments/Current Experiment.txt'
# path = '/Users/ross/Pychrondata_dev/experiments/test.txt'
open_experiment(event, path)
# ===============================================================================
# Utilities
# ===============================================================================
class SignalCalculatorAction(ExperimentAction):
name = 'Signal Calculator'
dname = 'Signal Calculator'
def perform(self, event):
obj = self._get_service(event, 'pychron.experiment.signal_calculator.SignalCalculator')
app = event.task.window.application
app.open_view(obj)
class ResetQueuesAction(TaskAction):
method = 'reset_queues'
name = 'Reset Queues'
dname = 'Reset Queues'
class LastAnalysisRecoveryAction(Action):
name = 'Recover Last Analysis'
dname = 'Recover Last Analysis'
def perform(self, event):
from pychron.experiment.analysis_recovery import AnalysisRecoverer
a = AnalysisRecoverer()
a.recover_last_analysis()
# ============= EOF ====================================
| [
"[email protected]"
] | |
632bcfd9791ccbdc1e14fd7487c231c3e8ccd408 | 29b6a856a81a47ebab7bfdba7fe8a7b845123c9e | /dingtalk/python/alibabacloud_dingtalk/conv_file_1_0/models.py | 4327631ff59f6b9e1c0b1d34a2c61fedcfc59b67 | [
"Apache-2.0"
] | permissive | aliyun/dingtalk-sdk | f2362b6963c4dbacd82a83eeebc223c21f143beb | 586874df48466d968adf0441b3086a2841892935 | refs/heads/master | 2023-08-31T08:21:14.042410 | 2023-08-30T08:18:22 | 2023-08-30T08:18:22 | 290,671,707 | 22 | 9 | null | 2021-08-12T09:55:44 | 2020-08-27T04:05:39 | PHP | UTF-8 | Python | false | false | 30,852 | py | # -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict
class GetSpaceHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetSpaceRequest(TeaModel):
def __init__(
self,
open_conversation_id: str = None,
union_id: str = None,
):
self.open_conversation_id = open_conversation_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class GetSpaceResponseBodySpace(TeaModel):
def __init__(
self,
corp_id: str = None,
create_time: str = None,
modified_time: str = None,
space_id: str = None,
):
self.corp_id = corp_id
self.create_time = create_time
self.modified_time = modified_time
self.space_id = space_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.corp_id is not None:
result['corpId'] = self.corp_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.space_id is not None:
result['spaceId'] = self.space_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('corpId') is not None:
self.corp_id = m.get('corpId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
return self
class GetSpaceResponseBody(TeaModel):
def __init__(
self,
space: GetSpaceResponseBodySpace = None,
):
self.space = space
def validate(self):
if self.space:
self.space.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.space is not None:
result['space'] = self.space.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('space') is not None:
temp_model = GetSpaceResponseBodySpace()
self.space = temp_model.from_map(m['space'])
return self
class GetSpaceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: GetSpaceResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = GetSpaceResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendRequest(TeaModel):
def __init__(
self,
dentry_id: str = None,
open_conversation_id: str = None,
space_id: str = None,
union_id: str = None,
):
self.dentry_id = dentry_id
self.open_conversation_id = open_conversation_id
self.space_id = space_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_id is not None:
result['dentryId'] = self.dentry_id
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryId') is not None:
self.dentry_id = m.get('dentryId')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SendResponseBodyFile(TeaModel):
def __init__(
self,
conversation_id: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
path: str = None,
size: int = None,
space_id: str = None,
status: str = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.conversation_id = conversation_id
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.path = path
self.size = size
self.space_id = space_id
self.status = status
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conversation_id is not None:
result['conversationId'] = self.conversation_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.path is not None:
result['path'] = self.path
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conversationId') is not None:
self.conversation_id = m.get('conversationId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class SendResponseBody(TeaModel):
def __init__(
self,
file: SendResponseBodyFile = None,
):
self.file = file
def validate(self):
if self.file:
self.file.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file is not None:
result['file'] = self.file.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file') is not None:
temp_model = SendResponseBodyFile()
self.file = temp_model.from_map(m['file'])
return self
class SendResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SendResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SendResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendByAppHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendByAppRequest(TeaModel):
def __init__(
self,
dentry_id: str = None,
space_id: str = None,
union_id: str = None,
):
self.dentry_id = dentry_id
self.space_id = space_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_id is not None:
result['dentryId'] = self.dentry_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryId') is not None:
self.dentry_id = m.get('dentryId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SendByAppResponseBodyFile(TeaModel):
def __init__(
self,
conversation_id: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
path: str = None,
size: int = None,
space_id: str = None,
status: str = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.conversation_id = conversation_id
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.path = path
self.size = size
self.space_id = space_id
self.status = status
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conversation_id is not None:
result['conversationId'] = self.conversation_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.path is not None:
result['path'] = self.path
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conversationId') is not None:
self.conversation_id = m.get('conversationId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class SendByAppResponseBody(TeaModel):
def __init__(
self,
file: SendByAppResponseBodyFile = None,
):
self.file = file
def validate(self):
if self.file:
self.file.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file is not None:
result['file'] = self.file.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file') is not None:
temp_model = SendByAppResponseBodyFile()
self.file = temp_model.from_map(m['file'])
return self
class SendByAppResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SendByAppResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SendByAppResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendLinkHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendLinkRequest(TeaModel):
def __init__(
self,
dentry_id: str = None,
open_conversation_id: str = None,
space_id: str = None,
union_id: str = None,
):
self.dentry_id = dentry_id
self.open_conversation_id = open_conversation_id
self.space_id = space_id
self.union_id = union_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dentry_id is not None:
result['dentryId'] = self.dentry_id
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.union_id is not None:
result['unionId'] = self.union_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dentryId') is not None:
self.dentry_id = m.get('dentryId')
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
return self
class SendLinkResponseBodyFile(TeaModel):
def __init__(
self,
conversation_id: str = None,
create_time: str = None,
creator_id: str = None,
extension: str = None,
id: str = None,
modified_time: str = None,
modifier_id: str = None,
name: str = None,
parent_id: str = None,
path: str = None,
size: int = None,
space_id: str = None,
status: str = None,
type: str = None,
uuid: str = None,
version: int = None,
):
self.conversation_id = conversation_id
self.create_time = create_time
self.creator_id = creator_id
self.extension = extension
self.id = id
self.modified_time = modified_time
self.modifier_id = modifier_id
self.name = name
self.parent_id = parent_id
self.path = path
self.size = size
self.space_id = space_id
self.status = status
self.type = type
self.uuid = uuid
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conversation_id is not None:
result['conversationId'] = self.conversation_id
if self.create_time is not None:
result['createTime'] = self.create_time
if self.creator_id is not None:
result['creatorId'] = self.creator_id
if self.extension is not None:
result['extension'] = self.extension
if self.id is not None:
result['id'] = self.id
if self.modified_time is not None:
result['modifiedTime'] = self.modified_time
if self.modifier_id is not None:
result['modifierId'] = self.modifier_id
if self.name is not None:
result['name'] = self.name
if self.parent_id is not None:
result['parentId'] = self.parent_id
if self.path is not None:
result['path'] = self.path
if self.size is not None:
result['size'] = self.size
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.status is not None:
result['status'] = self.status
if self.type is not None:
result['type'] = self.type
if self.uuid is not None:
result['uuid'] = self.uuid
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conversationId') is not None:
self.conversation_id = m.get('conversationId')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
if m.get('creatorId') is not None:
self.creator_id = m.get('creatorId')
if m.get('extension') is not None:
self.extension = m.get('extension')
if m.get('id') is not None:
self.id = m.get('id')
if m.get('modifiedTime') is not None:
self.modified_time = m.get('modifiedTime')
if m.get('modifierId') is not None:
self.modifier_id = m.get('modifierId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('parentId') is not None:
self.parent_id = m.get('parentId')
if m.get('path') is not None:
self.path = m.get('path')
if m.get('size') is not None:
self.size = m.get('size')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('type') is not None:
self.type = m.get('type')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('version') is not None:
self.version = m.get('version')
return self
class SendLinkResponseBody(TeaModel):
def __init__(
self,
file: SendLinkResponseBodyFile = None,
):
self.file = file
def validate(self):
if self.file:
self.file.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file is not None:
result['file'] = self.file.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('file') is not None:
temp_model = SendLinkResponseBodyFile()
self.file = temp_model.from_map(m['file'])
return self
class SendLinkResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
status_code: int = None,
body: SendLinkResponseBody = None,
):
self.headers = headers
self.status_code = status_code
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.status_code, 'status_code')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.status_code is not None:
result['statusCode'] = self.status_code
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('statusCode') is not None:
self.status_code = m.get('statusCode')
if m.get('body') is not None:
temp_model = SendLinkResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| [
"[email protected]"
] | |
369e5de6978cd855314fe286b88ec95c0f367146 | 19bdbe1c4aa00ba9799764681f16e09f65d6ea2b | /np/lib/smtp.py | 0065154141cf437ba3588749e4b816c5fc03783f | [] | no_license | invisibleroads/networkplanner | b4a3c7b3c0c169c3cd6610a6fb77125434dcb1c4 | 7ad8c0f2b4078f6cca681205e1671d060a937c18 | refs/heads/master | 2023-08-11T17:33:44.458438 | 2012-05-31T13:41:04 | 2012-05-31T13:41:04 | 961,674 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | 'Routines for sending messages'
# Import system modules
import smtplib
import email.message
import email.utils
import socket
def sendMessage(fromByValue, toByValue, subject, body, headerByName=None):
'Send a message using SMTP'
# Prepare
message = email.message.Message()
message.add_header('from', email.utils.formataddr((fromByValue['nickname'], fromByValue['email'])))
message.add_header('to', email.utils.formataddr((toByValue['nickname'], toByValue['email'])))
message.add_header('subject', subject)
message.set_payload(body)
if headerByName:
for key, value in headerByName.iteritems():
message.add_header(key, value)
# Connect to server
if fromByValue['smtp'] == 'localhost':
server = smtplib.SMTP('localhost')
else:
server = smtplib.SMTP_SSL(fromByValue['smtp'], 465)
if len(fromByValue['username']):
server.login(fromByValue['username'], fromByValue['password'])
# Send mail
try:
server.sendmail(fromByValue['email'], toByValue['email'], message.as_string())
except socket.error, error:
raise SMTPError(error)
finally:
server.quit()
class SMTPError(Exception):
pass
| [
"[email protected]"
] | |
3fd599f2dd2b120dfc1fa457dd87c9056ade3f26 | 00a9295409b78a53ce790f7ab44931939f42c0e0 | /FPGA/apio/iCEBreaker/FIR_Filter/sympy/venv/lib/python3.8/site-packages/sympy/multipledispatch/utils.py | 11cea683ed08448b11c2efecaea1b7e234934cc4 | [
"Apache-2.0"
] | permissive | klei22/Tech-OnBoarding-Class | c21f0762d2d640d5e9cb124659cded5c865b32d4 | 960e962322c37be9117e0523641f8b582a2beceb | refs/heads/master | 2022-11-10T13:17:39.128342 | 2022-10-25T08:59:48 | 2022-10-25T08:59:48 | 172,292,871 | 2 | 3 | Apache-2.0 | 2019-05-19T00:26:32 | 2019-02-24T03:50:35 | C | UTF-8 | Python | false | false | 3,042 | py | def expand_tuples(L):
"""
>>> from sympy.multipledispatch.utils import expand_tuples
>>> expand_tuples([1, (2, 3)])
[(1, 2), (1, 3)]
>>> expand_tuples([1, 2])
[(1, 2)]
"""
if not L:
return [()]
elif not isinstance(L[0], tuple):
rest = expand_tuples(L[1:])
return [(L[0],) + t for t in rest]
else:
rest = expand_tuples(L[1:])
return [(item,) + t for t in rest for item in L[0]]
# Taken from theano/theano/gof/sched.py
# Avoids licensing issues because this was written by Matthew Rocklin
def _toposort(edges):
""" Topological sort algorithm by Kahn [1] - O(nodes + vertices)
inputs:
edges - a dict of the form {a: {b, c}} where b and c depend on a
outputs:
L - an ordered list of nodes that satisfy the dependencies of edges
>>> from sympy.multipledispatch.utils import _toposort
>>> _toposort({1: (2, 3), 2: (3, )})
[1, 2, 3]
Closely follows the wikipedia page [2]
[1] Kahn, Arthur B. (1962), "Topological sorting of large networks",
Communications of the ACM
[2] https://en.wikipedia.org/wiki/Toposort#Algorithms
"""
incoming_edges = reverse_dict(edges)
incoming_edges = {k: set(val) for k, val in incoming_edges.items()}
S = {v for v in edges if v not in incoming_edges}
L = []
while S:
n = S.pop()
L.append(n)
for m in edges.get(n, ()):
assert n in incoming_edges[m]
incoming_edges[m].remove(n)
if not incoming_edges[m]:
S.add(m)
if any(incoming_edges.get(v, None) for v in edges):
raise ValueError("Input has cycles")
return L
def reverse_dict(d):
"""Reverses direction of dependence dict
>>> d = {'a': (1, 2), 'b': (2, 3), 'c':()}
>>> reverse_dict(d) # doctest: +SKIP
{1: ('a',), 2: ('a', 'b'), 3: ('b',)}
:note: dict order are not deterministic. As we iterate on the
input dict, it make the output of this function depend on the
dict order. So this function output order should be considered
as undeterministic.
"""
result = {}
for key in d:
for val in d[key]:
result[val] = result.get(val, tuple()) + (key, )
return result
# Taken from toolz
# Avoids licensing issues because this version was authored by Matthew Rocklin
def groupby(func, seq):
""" Group a collection by a key function
>>> from sympy.multipledispatch.utils import groupby
>>> names = ['Alice', 'Bob', 'Charlie', 'Dan', 'Edith', 'Frank']
>>> groupby(len, names) # doctest: +SKIP
{3: ['Bob', 'Dan'], 5: ['Alice', 'Edith', 'Frank'], 7: ['Charlie']}
>>> iseven = lambda x: x % 2 == 0
>>> groupby(iseven, [1, 2, 3, 4, 5, 6, 7, 8]) # doctest: +SKIP
{False: [1, 3, 5, 7], True: [2, 4, 6, 8]}
See Also:
``countby``
"""
d = dict()
for item in seq:
key = func(item)
if key not in d:
d[key] = list()
d[key].append(item)
return d
| [
"[email protected]"
] | |
e7286393de85a9ea1daeff4f6a590f0d35dd069b | f50f1aa1f8f139d546db3230a1cb1f53043fd9e6 | /hardware/mobile/ifuse/actions.py | 4253ce5b51f00a7f0adaacfac0ba3a76d71890d3 | [] | no_license | pars-linux/corporate2 | 7887961d1552d39bc3b0bef4a60fd3413d9b82bb | 14d1eacfc824fb8d0bff8173e7ac06b36b88d10d | refs/heads/master | 2020-05-26T15:02:12.005654 | 2017-02-27T03:07:14 | 2017-02-27T03:07:14 | 82,476,084 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 557 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2009-2010 TUBITAK/UEKAE
# Licensed under the GNU General Public License, version 2.
# See the file http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt
from pisi.actionsapi import autotools
from pisi.actionsapi import pisitools
from pisi.actionsapi import get
def setup():
autotools.autoreconf("-vfi")
autotools.configure()
def build():
autotools.make("-j1")
def install():
autotools.rawInstall("DESTDIR=%s" % get.installDIR())
pisitools.dodoc("AUTHORS", "COPYING", "README")
| [
"[email protected]"
] | |
2da48d4fe2ab88ad57d4bc2ce4b47d37ade84327 | 00c14f5816c3ef6a9ff5652af89c27c12bcf023c | /example/jspm_0_17/jspm_0_17/urls.py | 9ef86b2113613f0783470d90f157872b78c2522d | [
"MIT",
"ISC"
] | permissive | ilyashupta/django-systemjs | 148fd7de73aeb2cf562a07d3bb392436f3a78010 | f4d26794c06449d4d3ae2a6f7ab0bc550b35b0c7 | refs/heads/master | 2023-04-27T14:41:45.265046 | 2016-09-19T09:15:35 | 2016-09-19T09:15:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | """jspm_0_17 URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from django.views.generic import TemplateView
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^$', TemplateView.as_view(template_name='base.html')),
]
| [
"[email protected]"
] | |
936fe33888460fe111915ebee493e4b636140d10 | b42957e496e5c9447b858d7382caea83ce9ea431 | /packtml/__init__.py | d3f0e325a23686c69f7eec073c983de5f0695885 | [
"MIT"
] | permissive | PacktPublishing/Supervised-Machine-Learning-with-Python | 153b9f5248fd4ca79896a277c7f703cf5899ac07 | 00d6ce2451547a73e6358d85937f8cbf2af762a4 | refs/heads/master | 2023-02-02T21:20:35.889344 | 2023-01-30T08:34:13 | 2023-01-30T08:34:13 | 187,639,872 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 658 | py | # -*- coding: utf-8 -*-
import os
# global namespace:
from packtml import clustering
from packtml import decision_tree
from packtml import metrics
from packtml import neural_net
from packtml import recommendation
from packtml import regression
from packtml import utils
# set the version
packtml_location = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(packtml_location, "VERSION")) as vsn:
__version__ = vsn.read().strip()
# remove from global namespace
del os
del packtml_location
del vsn
__all__ = [
'clustering',
'decision_tree',
'metrics',
'neural_net',
'recommendation',
'regression',
'utils'
]
| [
"[email protected]"
] | |
286d4837a392a3730412cc78c44d91c56603e5b6 | dd8227454b817ccf2ceb24b3dfd4260d4ded7a72 | /scripts/item/consume_2434546.py | fd4214fd1a7ed1410ab093ba89cb8ce96fcf7213 | [
"MIT"
] | permissive | Snewmy/swordie | 0dd3c17808b064c2cb2bd9576b51daf01ae5d686 | ae01ed4ec0eb20a18730e8cd209eea0b84a8dd17 | refs/heads/master | 2023-06-30T21:14:05.225798 | 2021-07-06T14:32:39 | 2021-07-06T14:32:39 | 389,497,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | # Scribbler Damage Skin
success = sm.addDamageSkin(2434546)
if success:
sm.chat("The Scribbler Damage Skin has been added to your account's damage skin collection.")
| [
"[email protected]"
] | |
880c458d3f3529d6d643d216ff4c6061674fcd20 | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /test/legacy_test/test_jit_save_load.py | 7f58638e7ac7ad178372817cd669397568a0e4c8 | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 57,829 | py | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pickle
import shutil
import tempfile
import unittest
import numpy as np
import paddle
from paddle import fluid
from paddle.fluid import unique_name
from paddle.jit.api import to_static
from paddle.jit.translated_layer import INFER_PARAMS_INFO_SUFFIX
from paddle.nn import Linear
from paddle.static import InputSpec
BATCH_SIZE = 32
BATCH_NUM = 10
SEED = 10
def random_batch_reader(input_size, label_size):
def _get_random_inputs_and_labels(input_size, label_size):
np.random.seed(SEED)
input = np.random.random(size=input_size).astype('float32')
label = np.random.random(size=label_size).astype('int64')
return input, label
def __reader__():
for _ in range(BATCH_NUM):
batch_input, batch_label = _get_random_inputs_and_labels(
[BATCH_SIZE, input_size], [BATCH_SIZE, label_size]
)
yield batch_input, batch_label
return __reader__
class LinearNet(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static
def forward(self, x):
return self._linear(x)
class LinearNetWithInputSpec(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(input_spec=[InputSpec(shape=[None, 784], dtype='float32')])
def forward(self, x):
return self._linear(x)
class LinearNetNotDeclarative(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
def forward(self, x):
return self._linear(x)
class LinerNetWithLabel(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image"),
InputSpec(shape=[None, 1], dtype='int64', name="label"),
]
)
def forward(self, x, label):
out = self._linear(x)
loss = paddle.nn.functional.cross_entropy(
out, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
return out, avg_loss
class LinerNetWithPruneInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image"),
InputSpec(shape=[None, 1], dtype='int64', name="label"),
]
)
def forward(self, x, label):
out = self._linear(x)
loss = paddle.nn.functional.cross_entropy(
out, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
return out
class LinerNetWithUselessInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image"),
InputSpec(shape=[None, 1], dtype='int64', name="label"),
]
)
def forward(self, x, label):
out = self._linear(x)
return out
class LinearNetReturnLoss(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@to_static
def forward(self, x):
y = self._linear(x)
z = self._linear(y)
loss = paddle.mean(z)
return z, loss
class LinearNetMultiInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear1 = Linear(in_size, out_size)
self._linear2 = Linear(in_size, out_size)
@to_static(
input_spec=[
InputSpec([None, 8], dtype='float32'),
InputSpec([None, 8], dtype='float32'),
]
)
def forward(self, x, y):
x_out = self._linear1(x)
y_out = self._linear2(y)
loss = paddle.mean(x_out + y_out)
return x_out, y_out, loss
class LinearNetMultiInput1(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear1 = Linear(in_size, out_size)
self._linear2 = Linear(in_size, out_size)
@to_static(
input_spec=(
InputSpec([None, 8], dtype='float32'),
InputSpec([None, 8], dtype='float32'),
)
)
def forward(self, x, y):
x_out = self._linear1(x)
y_out = self._linear2(y)
loss = paddle.mean(x_out + y_out)
return x_out, y_out, loss
class MultiLoadingLinearNet(paddle.nn.Layer):
def __init__(self, size, model_path):
super().__init__()
self._linear = Linear(size, size)
self._load_linear1 = paddle.jit.load(model_path)
self._load_linear2 = paddle.jit.load(model_path)
@to_static
def forward(self, x):
tmp1 = self._linear(x)
tmp2 = self._load_linear1(tmp1)
tmp3 = self._load_linear2(tmp2)
y = self._linear(tmp3)
return y
class LinearNetReturnHidden(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear_1 = Linear(in_size, out_size)
self._linear_2 = Linear(in_size, out_size)
@to_static
def forward(self, x):
y = self._linear_1(x)
z = self._linear_2(y)
loss = paddle.mean(z)
return y, loss
class LinearNetWithNestOut(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear_1 = Linear(in_size, out_size)
self._linear_2 = Linear(in_size, out_size)
@to_static
def forward(self, x):
y = self._linear_1(x)
z = self._linear_2(y)
out = y + z
loss = paddle.mean(out)
return y, [(z, loss), out]
class LinearNetWithDictInput(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
@paddle.jit.to_static(
input_spec=[
{'img': InputSpec(shape=[None, 8], dtype='float32', name='img')},
{'label': InputSpec(shape=[None, 1], dtype='int64', name='label')},
]
)
def forward(self, img, label):
out = self._linear(img['img'])
# not return loss to avoid prune output
loss = paddle.nn.functional.cross_entropy(out, label['label'])
return out
class LinearNetWithDictInputNoPrune(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear = Linear(in_size, out_size)
def forward(self, img):
out = self._linear(img['img'] + img['img2'])
return out
class EmptyLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
@paddle.jit.to_static
def forward(self, x):
return x
class NoParamLayer(paddle.nn.Layer):
def __init__(self):
super().__init__()
@paddle.jit.to_static
def forward(self, x, y):
return x + y
class LinearNetWithMultiStaticFunc(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self._linear_0 = Linear(in_size, out_size)
self._linear_1 = Linear(in_size, out_size)
self._scale = paddle.to_tensor([9.9])
@paddle.jit.to_static
def forward(self, x):
return self._linear_0(x)
@paddle.jit.to_static
def forward_no_param(self, x):
return x
@paddle.jit.to_static
def forward_general(self, x):
return self._linear_0(x) + self._linear_1(x) * self._scale
def train(layer, input_size=784, label_size=1):
# create optimizer
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters()
)
# create data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(input_size, label_size)
)
# train
for data in train_loader():
img, label = data
label.stop_gradient = True
cost = layer(img)
loss = paddle.nn.functional.cross_entropy(
cost, label, reduction='none', use_softmax=False
)
avg_loss = paddle.mean(loss)
avg_loss.backward()
sgd.minimize(avg_loss)
layer.clear_gradients()
return [img], layer, avg_loss
def train_with_label(layer, input_size=784, label_size=1):
# create optimizer
sgd = fluid.optimizer.SGDOptimizer(
learning_rate=0.01, parameter_list=layer.parameters()
)
# create data loader
train_loader = fluid.io.DataLoader.from_generator(capacity=5)
train_loader.set_batch_generator(
random_batch_reader(input_size, label_size)
)
# train
for data in train_loader():
img, label = data
label.stop_gradient = True
out, avg_loss = layer(img, label)
avg_loss.backward()
sgd.minimize(avg_loss)
layer.clear_gradients()
return out
class TestJitSaveLoad(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load/model"
)
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def tearDown(self):
self.temp_dir.cleanup()
def train_and_save_model(self, model_path=None):
layer = LinearNet(784, 1)
example_inputs, layer, _ = train(layer)
final_model_path = model_path if model_path else self.model_path
orig_input_types = [type(x) for x in example_inputs]
paddle.jit.save(
layer=layer, path=final_model_path, input_spec=example_inputs
)
new_input_types = [type(x) for x in example_inputs]
self.assertEqual(orig_input_types, new_input_types)
return layer
def test_save_load(self):
# train and save model
train_layer = self.train_and_save_model()
# load model
loaded_layer = paddle.jit.load(self.model_path)
self.load_and_inference(train_layer, loaded_layer)
self.load_dygraph_state_dict(train_layer)
self.load_and_finetune(train_layer, loaded_layer)
def load_and_inference(self, train_layer, infer_layer):
train_layer.eval()
infer_layer.eval()
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x).numpy(), infer_layer(x).numpy()
)
def load_and_finetune(self, train_layer, load_train_layer):
train_layer.train()
load_train_layer.train()
# train & compare
img0, _, train_loss = train(train_layer)
img1, _, load_train_loss = train(load_train_layer)
np.testing.assert_array_equal(
train_loss.numpy(), load_train_loss.numpy()
)
def load_dygraph_state_dict(self, train_layer):
train_layer.eval()
# construct new model
new_layer = LinearNet(784, 1)
orig_state_dict = new_layer.state_dict()
load_state_dict = paddle.load(self.model_path)
for structured_name in orig_state_dict:
self.assertTrue(structured_name in load_state_dict)
new_layer.set_state_dict(load_state_dict)
new_layer.eval()
# inference & compare
x = fluid.dygraph.to_variable(
np.random.random((1, 784)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x).numpy(), new_layer(x).numpy()
)
def test_load_dygraph_no_path(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load.no_path/model_path"
)
with self.assertRaises(ValueError):
model_dict = paddle.load(model_path)
def test_jit_load_no_path(self):
path = os.path.join(
self.temp_dir.name, "test_jit_save_load.no_path/model_path"
)
with self.assertRaises(ValueError):
loaded_layer = paddle.jit.load(path)
class TestSaveLoadWithNestOut(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_nest_output(self):
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
net = LinearNetWithNestOut(8, 8)
dy_outs = paddle.utils.flatten(net(x))
net = to_static(net, input_spec=[InputSpec([None, 8], name='x')])
model_path = os.path.join(self.temp_dir.name, "net_with_nest_out/model")
paddle.jit.save(net, model_path)
load_net = paddle.jit.load(model_path)
load_outs = paddle.utils.flatten(load_net(x))
self.assertTrue(len(dy_outs) == 4)
for dy_out, load_out in zip(dy_outs, load_outs):
np.testing.assert_allclose(
dy_out.numpy(), load_out.numpy(), rtol=1e-05
)
class TestSaveLoadWithDictInput(unittest.TestCase):
def test_dict_input(self):
# NOTE: This net cannot be executed, it is just
# a special case for exporting models in model validation
# We DO NOT recommend this writing way of Layer
net = LinearNetWithDictInput(8, 8)
# net.forward.concrete_program.inputs:
# (<__main__.LinearNetWithDictInput object at 0x7f2655298a98>,
# {'img': var img : fluid.VarType.LOD_TENSOR.shape(-1, 8).astype(VarType.FP32)},
# {'label': var label : fluid.VarType.LOD_TENSOR.shape(-1, 1).astype(VarType.INT64)})
self.assertEqual(len(net.forward.concrete_program.inputs), 3)
temp_dir = tempfile.TemporaryDirectory()
path = os.path.join(
temp_dir.name, "test_jit_save_load_with_dict_input/model"
)
# prune inputs
paddle.jit.save(
layer=net,
path=path,
input_spec=[
{'img': InputSpec(shape=[None, 8], dtype='float32', name='img')}
],
)
img = paddle.randn(shape=[4, 8], dtype='float32')
loaded_net = paddle.jit.load(path)
loaded_out = loaded_net(img)
# loaded_net._input_spec():
# [InputSpec(shape=(-1, 8), dtype=VarType.FP32, name=img)]
self.assertEqual(len(loaded_net._input_spec()), 1)
temp_dir.cleanup()
class TestSaveLoadWithDictInputNoPrune(unittest.TestCase):
def test_dict_input(self):
net = LinearNetWithDictInputNoPrune(8, 8)
temp_dir = tempfile.TemporaryDirectory()
path = os.path.join(
temp_dir.name, "test_jit_save_load_with_dict_input_no_prune/model"
)
# prune inputs
paddle.jit.save(
layer=net,
path=path,
input_spec=[
{
'img': InputSpec(
shape=[None, 8], dtype='float32', name='img'
),
'img2': InputSpec(
shape=[None, 8], dtype='float32', name='img2'
),
}
],
)
img = paddle.randn(shape=[4, 8], dtype='float32')
img2 = paddle.randn(shape=[4, 8], dtype='float32')
loaded_net = paddle.jit.load(path)
loaded_out = loaded_net(img, img2)
self.assertEqual(len(loaded_net._input_spec()), 2)
temp_dir.cleanup()
class TestSaveLoadWithInputSpec(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_with_input_spec(self):
net = LinearNetReturnLoss(8, 8)
# set x.shape = [None, 8]
net.forward = to_static(
net.forward, input_spec=[InputSpec([None, 8], name='x')]
)
model_path = os.path.join(
self.temp_dir.name, "input_spec.output_spec/model"
)
# check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 1)
input_x = net.forward.inputs[0]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_x.name == 'x')
# 1. prune loss
output_spec = net.forward.outputs[:1]
paddle.jit.save(net, model_path, output_spec=output_spec)
# 2. load to infer
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
pred = infer_layer(x)
def test_multi_in_out(self):
net = LinearNetMultiInput(8, 8)
model_path = os.path.join(
self.temp_dir.name, "multi_inout.output_spec1/model"
)
# 1. check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
input_y = net.forward.inputs[1]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_y.shape == (-1, 8))
# 2. prune loss
output_spec = net.forward.outputs[:2]
paddle.jit.save(net, model_path, output_spec=output_spec)
# 3. load to infer
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
y = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
# 4. predict
pred_x, pred_y = infer_layer(x, y)
# 1. prune y and loss
model_path = os.path.join(
self.temp_dir.name, "multi_inout.output_spec2/model"
)
output_spec = net.forward.outputs[:1]
paddle.jit.save(net, model_path, [input_x], output_spec=output_spec)
# 2. load again
infer_layer2 = paddle.jit.load(model_path)
# 3. predict
pred_xx = infer_layer2(x)
# 4. assert pred_x == pred_xx
np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05)
def test_multi_in_out1(self):
net = LinearNetMultiInput1(8, 8)
model_path = os.path.join(
self.temp_dir.name, "multi_inout1.output_spec1/model"
)
# 1. check inputs and outputs
self.assertTrue(len(net.forward.inputs) == 2)
input_x = net.forward.inputs[0]
input_y = net.forward.inputs[1]
self.assertTrue(input_x.shape == (-1, 8))
self.assertTrue(input_y.shape == (-1, 8))
# 2. prune loss
output_spec = net.forward.outputs[:2]
paddle.jit.save(net, model_path, output_spec=output_spec)
# 3. load to infer
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
y = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
# 4. predict
pred_x, pred_y = infer_layer(x, y)
# 1. prune y and loss
model_path = os.path.join(
self.temp_dir.name, "multi_inout1.output_spec2/model"
)
output_spec = net.forward.outputs[:1]
paddle.jit.save(net, model_path, (input_x,), output_spec=output_spec)
# 2. load again
infer_layer2 = paddle.jit.load(model_path)
# 3. predict
pred_xx = infer_layer2(x)
# 4. assert pred_x == pred_xx
np.testing.assert_allclose(pred_x.numpy(), pred_xx.numpy(), rtol=1e-05)
class TestJitSaveLoadConfig(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_output_spec(self):
train_layer = LinearNetReturnLoss(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters()
)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
for i in range(10):
out, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
model_path = os.path.join(
self.temp_dir.name, "save_load_config.output_spec"
)
output_spec = [out]
paddle.jit.save(
layer=train_layer,
path=model_path,
input_spec=[x],
output_spec=output_spec,
)
train_layer.eval()
infer_layer = paddle.jit.load(model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x)[0].numpy(), infer_layer(x).numpy()
)
def test_save_no_support_config_error(self):
layer = LinearNet(784, 1)
path = os.path.join(self.temp_dir.name, "no_support_config_test")
with self.assertRaises(ValueError):
paddle.jit.save(layer=layer, path=path, model_filename="")
def test_load_empty_model_filename_error(self):
path = os.path.join(self.temp_dir.name, "error_model_filename_test")
with self.assertRaises(ValueError):
paddle.jit.load(path, model_filename="")
def test_load_empty_params_filename_error(self):
path = os.path.join(self.temp_dir.name, "error_params_filename_test")
with self.assertRaises(ValueError):
paddle.jit.load(path, params_filename="")
def test_load_with_no_support_config(self):
path = os.path.join(self.temp_dir.name, "no_support_config_test")
with self.assertRaises(ValueError):
paddle.jit.load(path, separate_params=True)
class TestJitMultipleLoading(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_multi_load/model"
)
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
# train and save base model
self.train_and_save_orig_model()
def tearDown(self):
self.temp_dir.cleanup()
def train_and_save_orig_model(self):
layer = LinearNet(self.linear_size, self.linear_size)
example_inputs, layer, _ = train(layer, self.linear_size, 1)
paddle.jit.save(
layer=layer, path=self.model_path, input_spec=example_inputs
)
def test_load_model_retransform_inference(self):
multi_loaded_layer = MultiLoadingLinearNet(
self.linear_size, self.model_path
)
state_dict = multi_loaded_layer.state_dict()
name_set = set()
for _, var in state_dict.items():
self.assertTrue(var.name not in name_set)
name_set.add(var.name)
class TestJitPruneModelAndLoad(unittest.TestCase):
def setUp(self):
self.linear_size = 4
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_prune_model_and_load/model"
)
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def tearDown(self):
self.temp_dir.cleanup()
def train_and_save(self):
train_layer = LinearNetReturnHidden(8, 8)
adam = fluid.optimizer.AdamOptimizer(
learning_rate=0.1, parameter_list=train_layer.parameters()
)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
for i in range(10):
hidden, loss = train_layer(x)
loss.backward()
adam.minimize(loss)
train_layer.clear_gradients()
output_spec = [hidden]
paddle.jit.save(
layer=train_layer,
path=self.model_path,
input_spec=[x],
output_spec=output_spec,
)
return train_layer
def test_load_pruned_model(self):
train_layer = self.train_and_save()
train_layer.eval()
infer_layer = paddle.jit.load(self.model_path)
x = fluid.dygraph.to_variable(
np.random.random((4, 8)).astype('float32')
)
np.testing.assert_array_equal(
train_layer(x)[0].numpy(), infer_layer(x).numpy()
)
def test_load_var_not_in_extra_var_info(self):
self.train_and_save()
# chage extra var info
var_info_path = self.model_path + INFER_PARAMS_INFO_SUFFIX
with open(var_info_path, 'rb') as f:
extra_var_info = pickle.load(f)
extra_var_info.clear()
with open(var_info_path, 'wb') as f:
pickle.dump(extra_var_info, f, protocol=2)
with self.assertRaises(RuntimeError):
paddle.jit.load(self.model_path)
class TestJitSaveMultiCases(unittest.TestCase):
def setUp(self):
# enable dygraph mode
fluid.enable_dygraph()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def verify_inference_correctness(
self, layer, model_path, with_label_and_loss=False, with_label=False
):
layer.eval()
loaded_layer = paddle.jit.load(model_path)
loaded_layer.eval()
# inference & compare
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
if with_label_and_loss:
y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
pred, _ = layer(x, y)
pred = pred.numpy()
elif with_label:
y = paddle.to_tensor(np.random.random((1, 1)).astype('int64'))
pred = layer(x, y)
pred = pred.numpy()
else:
pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy()
np.testing.assert_array_equal(
pred,
loaded_pred,
err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format(
pred, loaded_pred
),
)
def test_no_prune_to_static_after_train(self):
layer = LinearNet(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_to_static_after_train/model"
)
paddle.jit.save(layer, model_path)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_to_static_no_train(self):
layer = LinearNetWithInputSpec(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_to_static_no_train/model"
)
paddle.jit.save(layer, model_path)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_no_to_static_after_train(self):
layer = LinearNetNotDeclarative(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_no_to_static_after_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_no_to_static_after_train_with_examples(self):
layer = LinearNetNotDeclarative(784, 1)
example_inputs, _, _ = train(layer)
model_path = os.path.join(
self.temp_dir.name,
"test_no_prune_no_to_static_after_train_with_examples/model",
)
paddle.jit.save(layer=layer, path=model_path, input_spec=example_inputs)
self.verify_inference_correctness(layer, model_path)
def test_no_prune_no_to_static_no_train(self):
layer = LinearNetNotDeclarative(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_no_to_static_no_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
self.verify_inference_correctness(layer, model_path)
def test_prune_to_static_after_train(self):
layer = LinerNetWithLabel(784, 1)
out = train_with_label(layer)
model_path = os.path.join(
self.temp_dir.name, "test_prune_to_static_after_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
output_spec=[out],
)
self.verify_inference_correctness(
layer, model_path, with_label_and_loss=True
)
def test_prune_to_static_no_train(self):
layer = LinerNetWithLabel(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_prune_to_static_no_train/model"
)
# TODO: no train, cannot get output_spec var here
# now only can use index
output_spec = layer.forward.outputs[:1]
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
output_spec=output_spec,
)
self.verify_inference_correctness(
layer, model_path, with_label_and_loss=True
)
def test_prune_input_to_static_no_train(self):
layer = LinerNetWithPruneInput(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_prune_input_to_static_no_train/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
)
self.verify_inference_correctness(layer, model_path, with_label=True)
def test_prune_useless_input_to_static_no_train(self):
layer = LinerNetWithUselessInput(784, 1)
model_path = os.path.join(
self.temp_dir.name,
"test_prune_useless_input_to_static_no_train/model",
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
)
self.verify_inference_correctness(layer, model_path, with_label=True)
def test_no_prune_input_spec_name_warning(self):
layer = LinearNetWithInputSpec(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_no_prune_input_spec_name_warning/model"
)
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name='feed_input')
],
)
self.verify_inference_correctness(layer, model_path)
def test_not_prune_output_spec_name_warning(self):
layer = LinearNet(784, 1)
train(layer)
model_path = os.path.join(
self.temp_dir.name, "test_not_prune_output_spec_name_warning/model"
)
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
paddle.jit.save(layer, model_path, output_spec=[out])
self.verify_inference_correctness(layer, model_path)
def test_prune_input_spec_name_error(self):
layer = LinerNetWithLabel(784, 1)
model_path = os.path.join(
self.temp_dir.name, "test_prune_input_spec_name_error/model"
)
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
model_path,
input_spec=[InputSpec(shape=[None, 784], dtype='float32')],
)
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(
shape=[None, 784], dtype='float32', name='feed_input'
)
],
)
def test_prune_output_spec_name_error(self):
layer = LinerNetWithLabel(784, 1)
train_with_label(layer)
model_path = os.path.join(
self.temp_dir.name, "test_prune_to_static_after_train/model"
)
out = paddle.to_tensor(np.random.random((1, 1)).astype('float'))
with self.assertRaises(ValueError):
paddle.jit.save(
layer,
model_path,
input_spec=[
InputSpec(shape=[None, 784], dtype='float32', name="image")
],
output_spec=[out],
)
class TestJitSaveLoadEmptyLayer(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_save_load_empty_layer/model"
)
# enable dygraph mode
paddle.disable_static()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_empty_layer(self):
layer = EmptyLayer()
x = paddle.to_tensor(np.random.random(10).astype('float32'))
out = layer(x)
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x)
np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadNoParamLayer(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
self.model_path = os.path.join(
self.temp_dir.name, "jit_save_load_no_param_layer/model"
)
# enable dygraph mode
paddle.disable_static()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_no_param_layer(self):
layer = NoParamLayer()
x = paddle.to_tensor(np.random.random(5).astype('float32'))
y = paddle.to_tensor(np.random.random(5).astype('float32'))
out = layer(x, y)
paddle.jit.save(layer, self.model_path)
load_layer = paddle.jit.load(self.model_path)
load_out = load_layer(x, y)
np.testing.assert_array_equal(out, load_out)
class TestJitSaveLoadMultiMethods(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_inference(self):
model_path_inference = os.path.join(
self.temp_dir.name, "jit_save_load_multi_methods/model"
)
IMAGE_SIZE = 224
layer = LinearNetWithMultiStaticFunc(IMAGE_SIZE, 10)
inps = paddle.randn([1, IMAGE_SIZE])
result_origin = {}
for func in dir(layer):
if func.startswith('forward'):
result_origin[func] = getattr(layer, func, None)(inps)
paddle.jit.save(layer, model_path_inference)
load_net = paddle.jit.load(model_path_inference)
for func, result in result_origin.items():
self.assertTrue(
float(
(result - getattr(load_net, func, None)(inps)).abs().max()
)
< 1e-5
)
def test_jit_save_load_multi_methods_inputspec(self):
model_path = os.path.join(
self.temp_dir.name, 'jit_save_load_multi_methods/model'
)
layer = LinearNetWithMultiStaticFunc(784, 1)
with self.assertRaises(ValueError):
paddle.jit.save(
layer, model_path, input_spec=[InputSpec(shape=[None, 784])]
)
def test_parse_name(self):
model_path_inference = os.path.join(
self.temp_dir.name, "jit_save_load_parse_name/model"
)
IMAGE_SIZE = 224
layer = LinearNet(IMAGE_SIZE, 1)
inps = paddle.randn([1, IMAGE_SIZE])
layer(inps)
paddle.jit.save(layer, model_path_inference)
paddle.jit.save(layer, model_path_inference + '_v2')
load_net = paddle.jit.load(model_path_inference)
self.assertFalse(hasattr(load_net, 'v2'))
class LayerSaved(paddle.nn.Layer):
def __init__(self, in_size, out_size):
super().__init__()
self.hidden = 100
self._linear_0 = Linear(in_size, self.hidden)
self._linear_1_0 = Linear(self.hidden, self.hidden)
self._linear_1_1 = Linear(self.hidden, self.hidden)
self._linear_2 = Linear(self.hidden, out_size)
self._scale = paddle.to_tensor([9.9])
@paddle.jit.to_static
def forward(self, x):
y = self._linear_0(x)
# Multiple blocks
if paddle.shape(x)[0] == 1:
y = self._linear_1_0(y)
else:
y += self._linear_1_1(y + self._scale)
return self._linear_2(y)
class Net(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc1 = paddle.nn.Linear(4, 4)
self.fc2 = paddle.nn.Linear(4, 4)
self.bias = 0.4
self.flag = paddle.ones([2], dtype="int32")
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def log_softmax(self, input):
return paddle.nn.functional.log_softmax(input, axis=-1)
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def forward(self, x):
out = self.fc1(x)
out = paddle.nn.functional.relu(out)
out = paddle.mean(out)
return out
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def infer(self, input):
out = self.fc2(input)
out = out + self.bias
out = paddle.mean(out)
return out
# For extra Python float
@paddle.jit.to_static(property=True)
def fbias(self):
return self.bias + 1
@paddle.jit.to_static(property=True)
def down_sampling(self):
return 4
@paddle.jit.to_static(property=True)
def fstr(self):
return "save str property"
@paddle.jit.to_static(property=True)
def ints(self):
return [10, 20]
@paddle.jit.to_static(property=True)
def floats(self):
return [1.1, 2.2]
@paddle.jit.to_static(property=True)
def strs(self):
return ["hello", "world"]
class NetTensor(paddle.nn.Layer):
def __init__(self):
super().__init__()
self.fc1 = paddle.nn.Linear(4, 4)
self.fc2 = paddle.nn.Linear(4, 4)
self.bias = 0.4
self.flag = paddle.ones([2], dtype="int32")
@paddle.jit.to_static(input_spec=[InputSpec([None, 4], dtype='float32')])
def forward(self, x):
out = self.fc1(x)
out = paddle.nn.functional.relu(out)
out = paddle.mean(out)
return out
@paddle.jit.to_static(property=True)
def fflag(self):
return True
class TestJitSaveCombineProperty(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_combine_property(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_combine/model"
)
# Use new namespace
with unique_name.guard():
net = Net()
# save
paddle.jit.save(net, model_path, combine_params=True)
def test_jit_save_tensor_property(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_combine/model"
)
# Use new namespace
with unique_name.guard():
net = NetTensor()
paddle.jit.save(net, model_path, combine_params=True)
class LayerLoadFinetune(paddle.nn.Layer):
def __init__(self, in_size, out_size, load_path):
super().__init__()
# Test duplicate name
self._linear_0 = Linear(in_size, in_size)
self._linear_1_0 = Linear(out_size, in_size)
self._linear_1_1 = Linear(out_size, in_size)
self._linear_2 = Linear(out_size, out_size)
self._scale = paddle.to_tensor([9.9])
# Load multiple times
self._load_l1 = paddle.jit.load(load_path)
self._load_l2 = paddle.jit.load(load_path)
@paddle.jit.to_static
def forward(self, x):
y = self._linear_0(x)
y = self._load_l1(y)
# Multiple blocks
if paddle.shape(x)[0] == 1:
y = self._linear_1_0(y)
y = self._load_l1(y)
else:
y += self._linear_1_1(x + self._scale)
y = self._load_l2(y)
y = self._linear_1_0(y)
y = self._load_l1(y)
y = self._linear_1_0(y)
# Use the same layer multiple times.
y = self._load_l1(y)
return y
class TestJitSaveLoadSaveWithoutRunning(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_finetune_load(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load_save_without_running/model"
)
IMAGE_SIZE = 224
inps0 = paddle.randn([1, IMAGE_SIZE])
inps1 = paddle.randn([2, IMAGE_SIZE])
# Use new namespace
with unique_name.guard():
layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
# save
paddle.jit.save(
layer_save,
model_path,
input_spec=[
paddle.static.InputSpec(
shape=[None, IMAGE_SIZE], dtype='float32'
)
],
)
result_00 = layer_save(inps0)
result_01 = layer_save(inps1)
# load and save without running
with unique_name.guard():
layer_load = paddle.jit.load(model_path)
paddle.jit.save(
layer_load,
model_path,
input_spec=[
paddle.static.InputSpec(
shape=[None, IMAGE_SIZE], dtype='float32'
)
],
)
# reload
layer_reload = paddle.jit.load(model_path)
result_10 = layer_reload(inps0)
result_11 = layer_reload(inps1)
self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)
class TestJitSaveLoadFinetuneLoad(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_save_load_finetune_load(self):
model_path = os.path.join(
self.temp_dir.name, "test_jit_save_load_finetune_load/model"
)
IMAGE_SIZE = 224
inps0 = paddle.randn([1, IMAGE_SIZE])
inps1 = paddle.randn([2, IMAGE_SIZE])
# Use new namespace
with unique_name.guard():
layer_save = LayerSaved(IMAGE_SIZE, IMAGE_SIZE)
layer_save(inps0)
# save
paddle.jit.save(layer_save, model_path)
# load
with unique_name.guard():
layer_load = LayerLoadFinetune(IMAGE_SIZE, IMAGE_SIZE, model_path)
# train
train(layer_load, input_size=IMAGE_SIZE)
result_00 = layer_load(inps0)
result_01 = layer_load(inps1)
# save
paddle.jit.save(layer_load, model_path)
# load
layer_finetune = paddle.jit.load(model_path)
result_10 = layer_finetune(inps0)
result_11 = layer_finetune(inps1)
self.assertTrue(float((result_00 - result_10).abs().max()) < 1e-5)
self.assertTrue(float((result_01 - result_11).abs().max()) < 1e-5)
# NOTE(weixin): When there are multiple test functions in an
# `unittest.TestCase`, functions will affect each other,
# and there is a risk of random failure.
# So divided into three TestCase: TestJitSaveLoadFunctionCase1,
# TestJitSaveLoadFunctionCase2, TestJitSaveLoadFunctionCase3.
class TestJitSaveLoadFunctionCase1(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_static_function(self):
@paddle.jit.to_static
def fun(inputs):
return paddle.tanh(inputs)
path = os.path.join(
self.temp_dir.name, 'test_jit_save_load_function_1/func'
)
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(fun, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue((load_result - origin).abs().max() < 1e-10)
class TestJitSaveLoadFunctionCase2(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function_input_spec(self):
@paddle.jit.to_static(
input_spec=[
InputSpec(shape=[None, 6], dtype='float32', name='x'),
]
)
def fun(inputs):
return paddle.nn.functional.relu(inputs)
path = os.path.join(
self.temp_dir.name, 'test_jit_save_load_function_2/func'
)
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(fun, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue((load_result - origin).abs().max() < 1e-10)
class TestJitSaveLoadFunctionCase3(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function_function(self):
def fun(inputs):
return paddle.tanh(inputs)
path = os.path.join(
self.temp_dir.name, 'test_jit_save_load_function_3/func'
)
inps = paddle.rand([3, 6])
origin = fun(inps)
paddle.jit.save(
fun,
path,
input_spec=[
InputSpec(shape=[None, 6], dtype='float32', name='x'),
],
)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
self.assertTrue((load_result - origin).abs().max() < 1e-10)
class TestJitSaveLoadFunctionWithParamCase1(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function(self):
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(5, 6)
def forward(self, x):
return paddle.tanh(x)
def anothor_forward(self, x):
return self._linear(x)
layer = LinearNet()
inps = paddle.rand([3, 5])
origin = layer.anothor_forward(inps)
func = paddle.jit.to_static(
layer.anothor_forward, [paddle.static.InputSpec(shape=[-1, 5])]
)
path = os.path.join(
self.temp_dir.name,
'test_jit_save_load_function_with_params_case1/func',
)
paddle.jit.save(func, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadFunctionWithParamCase2(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function(self):
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(5, 6)
def forward(self, x):
return paddle.tanh(x)
@paddle.jit.to_static(input_spec=[InputSpec(shape=[-1, 5])])
def anothor_forward(self, x):
return self._linear(x)
layer = LinearNet()
inps = paddle.rand([3, 5])
path = os.path.join(
self.temp_dir.name,
'test_jit_save_load_function_with_params_case2/func',
)
paddle.jit.save(layer.anothor_forward, path)
origin_result = layer.anothor_forward(inps)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
np.testing.assert_array_equal(
origin_result.numpy(), load_result.numpy()
)
class TestJitSaveLoadFunctionWithParamCase3(unittest.TestCase):
def setUp(self):
paddle.disable_static()
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_save_load_function(self):
class LinearNet(paddle.nn.Layer):
def __init__(self):
super().__init__()
self._linear = paddle.nn.Linear(5, 6)
def forward(self, x):
return paddle.tanh(x)
@paddle.jit.to_static
def anothor_forward(self, x):
return self._linear(x)
layer = LinearNet()
inps = paddle.rand([3, 5])
origin = layer.anothor_forward(inps)
path = os.path.join(
self.temp_dir.name,
'test_jit_save_load_function_with_params_case3/func',
)
paddle.jit.save(layer.anothor_forward, path)
load_func = paddle.jit.load(path)
load_result = load_func(inps)
np.testing.assert_array_equal(load_result.numpy(), origin.numpy())
class TestJitSaveLoadDataParallel(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def verify_inference_correctness(self, layer, path):
layer.eval()
loaded_layer = paddle.jit.load(path)
loaded_layer.eval()
# inference & compare
x = paddle.to_tensor(np.random.random((1, 784)).astype('float32'))
pred = layer(x).numpy()
loaded_pred = loaded_layer(x).numpy()
np.testing.assert_array_equal(
pred,
loaded_pred,
err_msg='Result diff when load and inference:\nlayer result:\n{}\nloaded layer result:\n{}'.format(
pred, loaded_pred
),
)
def test_jit_save_data_parallel_with_inputspec(self):
layer = LinearNetNotDeclarative(784, 1)
layer = paddle.DataParallel(layer)
path = os.path.join(
self.temp_dir.name, "jit_save_data_parallel_with_inputspec/model"
)
paddle.jit.save(
layer=layer, path=path, input_spec=[InputSpec(shape=[None, 784])]
)
self.verify_inference_correctness(layer, path)
def test_jit_save_data_parallel_with_to_static(self):
layer = LinearNetWithInputSpec(784, 1)
layer = paddle.DataParallel(layer)
path = os.path.join(
self.temp_dir.name, "jit_save_data_parallel_with_to_static/model"
)
paddle.jit.save(layer, path)
self.verify_inference_correctness(layer, path)
class InputSepcLayer(paddle.nn.Layer):
'''
A layer with InputSpec to test InputSpec compatibility
'''
@paddle.jit.to_static(
input_spec=[
InputSpec(shape=[None, 8], dtype='float32', name='x'),
InputSpec(shape=[None, 1], dtype='float64', name='y'),
]
)
def forward(self, x, y):
return x, y
class TestInputSpecCompatibility(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def _assert_input_spec_layer_return(self, expect_layer, test_layer):
input_x = paddle.uniform([8, 8], dtype='float32')
input_y = paddle.uniform([8, 1], dtype='float64')
expected_result = expect_layer(input_x, input_y)
test_result = test_layer(input_x, input_y)
np.testing.assert_allclose(
expected_result[0].numpy(), test_result[0].numpy()
)
np.testing.assert_allclose(
expected_result[1].numpy(), test_result[1].numpy()
)
def test_jit_save_compatible_input_sepc(self):
layer = InputSepcLayer()
save_dir = os.path.join(
self.temp_dir.name, "jit_save_compatible_input_spec"
)
path = save_dir + "/model"
paddle.jit.save(layer=layer, path=path)
no_input_spec_layer = paddle.jit.load(path)
self._assert_input_spec_layer_return(layer, no_input_spec_layer)
shutil.rmtree(save_dir)
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8], dtype='float32', name='x'),
InputSpec(shape=[None, 1], dtype='float64', name='y'),
],
)
same_input_spec_layer = paddle.jit.load(path)
self._assert_input_spec_layer_return(layer, same_input_spec_layer)
shutil.rmtree(save_dir)
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[8, 8], dtype='float32'),
InputSpec(shape=[8, -1], dtype='float64'),
],
)
compatible_input_spec_layer = paddle.jit.load(path)
self._assert_input_spec_layer_return(layer, compatible_input_spec_layer)
shutil.rmtree(save_dir)
def test_jit_save_incompatible_input_sepc(self):
layer = InputSepcLayer()
save_dir = os.path.join(
self.temp_dir.name, "jit_save_compatible_input_spec"
)
path = save_dir + "/model"
with self.assertRaises(ValueError):
# type mismatch
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8], dtype='float64'),
InputSpec(shape=[None, 1], dtype='float64'),
],
)
with self.assertRaises(ValueError):
# shape len mismatch
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8, 1], dtype='float32'),
InputSpec(shape=[None, 1], dtype='float64'),
],
)
with self.assertRaises(ValueError):
# shape mismatch
paddle.jit.save(
layer=layer,
path=path,
input_spec=[
InputSpec(shape=[None, 8], dtype='float32'),
InputSpec(shape=[None, 2], dtype='float64'),
],
)
if os.path.exists(save_dir):
shutil.rmtree(save_dir)
class NotJitForward(paddle.nn.Layer):
def __init__(self):
super().__init__()
def forward(self, x, y):
return x + y
class TestNotJitForward(unittest.TestCase):
def setUp(self):
self.temp_dir = tempfile.TemporaryDirectory()
def tearDown(self):
self.temp_dir.cleanup()
def test_jit_not_save_forward(self):
layer = NotJitForward()
save_dir = os.path.join(self.temp_dir.name, "jit_not_save_forward")
path = save_dir + "/model"
paddle.jit.save(layer=layer, path=path, skip_forward=True)
self.assertTrue(not os.path.exists(path + ".pdmodel"))
self.assertTrue(not os.path.exists(path + ".pdparam"))
with self.assertRaises(ValueError):
paddle.jit.load(path=path)
shutil.rmtree(save_dir)
if __name__ == '__main__':
unittest.main()
| [
"[email protected]"
] | |
0fdb86c8d95ec238f669e4cd793c8b90cee446f0 | b48a1d5733d10c39a112698c2286ae1afb02f36a | /announce/management/commands/migrate_mailchimp_users.py | 7cce1d4f3b7e6e48acb8b65b386b435c2095820c | [
"MIT"
] | permissive | p2pu/learning-circles | ecb317aaa8620cb076ce45c42d055e89e6586516 | ae8de4df48aae0844fb50dca5c62c099b3b2b0a3 | refs/heads/master | 2023-08-19T19:18:09.198077 | 2023-08-10T09:23:58 | 2023-08-10T09:23:58 | 32,735,768 | 11 | 10 | MIT | 2023-08-10T09:30:04 | 2015-03-23T14:05:41 | Python | UTF-8 | Python | false | false | 1,191 | py | from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth.models import User
from announce.mailchimp import archive_members, list_members, batch_subscribe
from studygroups.models import Profile
import requests
import logging
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Synchronize mailchimp audience with users that opted in for communications'
def handle(self, *args, **options):
# get all mailchimp users
mailchimp_members = list_members()
filter_subscribed = lambda x: x.get('status') not in ['unsubscribed', 'cleaned']
mailchimp_members = filter(filter_subscribed, mailchimp_members)
emails = [member.get('email_address').lower() for member in mailchimp_members]
# add all members with communicagtion_opt_in == True to mailchimp
subscribed = User.objects.filter(profile__communication_opt_in=True, is_active=True, profile__email_confirmed_at__isnull=False)
to_sub = list(filter(lambda u: u.email.lower() not in emails, subscribed))
print('{} users will be added to the mailchimp list'.format(len(to_sub)))
batch_subscribe(to_sub)
| [
"[email protected]"
] | |
d1660437d7cc1d437db44a397725e49216966700 | eefc47dcb8377239c34134024be8783a9e3b5f44 | /bimdata_api_client/models/raw_system.py | 3d6644047f00fe509b01b9df9dfbe5ddcdf9b50d | [] | no_license | Mike-FR/python-api-client | 4fea5afcd942ebdf6dca174e2d38afaeed71eee4 | 54b2b090cbbf127cf8ac0f17c3492e6d0e1c7f29 | refs/heads/master | 2023-06-29T13:07:30.438434 | 2021-07-28T09:08:54 | 2021-07-28T09:08:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,617 | py | # coding: utf-8
"""
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1
Contact: [email protected]
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from bimdata_api_client.configuration import Configuration
class RawSystem(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'uuid': 'str',
'name': 'str',
'description': 'str',
'object_type': 'str'
}
attribute_map = {
'uuid': 'uuid',
'name': 'name',
'description': 'description',
'object_type': 'object_type'
}
def __init__(self, uuid=None, name=None, description=None, object_type=None, local_vars_configuration=None): # noqa: E501
"""RawSystem - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._uuid = None
self._name = None
self._description = None
self._object_type = None
self.discriminator = None
self.uuid = uuid
self.name = name
self.description = description
self.object_type = object_type
@property
def uuid(self):
"""Gets the uuid of this RawSystem. # noqa: E501
:return: The uuid of this RawSystem. # noqa: E501
:rtype: str
"""
return self._uuid
@uuid.setter
def uuid(self, uuid):
"""Sets the uuid of this RawSystem.
:param uuid: The uuid of this RawSystem. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and uuid is None: # noqa: E501
raise ValueError("Invalid value for `uuid`, must not be `None`") # noqa: E501
if (self.local_vars_configuration.client_side_validation and
uuid is not None and len(uuid) < 1):
raise ValueError("Invalid value for `uuid`, length must be greater than or equal to `1`") # noqa: E501
self._uuid = uuid
@property
def name(self):
"""Gets the name of this RawSystem. # noqa: E501
:return: The name of this RawSystem. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this RawSystem.
:param name: The name of this RawSystem. # noqa: E501
:type: str
"""
self._name = name
@property
def description(self):
"""Gets the description of this RawSystem. # noqa: E501
:return: The description of this RawSystem. # noqa: E501
:rtype: str
"""
return self._description
@description.setter
def description(self, description):
"""Sets the description of this RawSystem.
:param description: The description of this RawSystem. # noqa: E501
:type: str
"""
self._description = description
@property
def object_type(self):
"""Gets the object_type of this RawSystem. # noqa: E501
:return: The object_type of this RawSystem. # noqa: E501
:rtype: str
"""
return self._object_type
@object_type.setter
def object_type(self, object_type):
"""Sets the object_type of this RawSystem.
:param object_type: The object_type of this RawSystem. # noqa: E501
:type: str
"""
self._object_type = object_type
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, RawSystem):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, RawSystem):
return True
return self.to_dict() != other.to_dict()
| [
"[email protected]"
] | |
5afae695f4d0d8c66f3a8d64f55c514f3919824c | a00ed711e3e08b50ad6e91cc07a2cddc4a1de5ea | /airflow/providers/microsoft/azure/operators/data_factory.py | 488ccbced070222c2fa5c3d5046514b7ee751015 | [
"Apache-2.0",
"BSD-3-Clause",
"MIT"
] | permissive | ishiis/airflow | 4305794e36b611d01f49e3f2401be3dc49782670 | 292440d54f4db84aaf0c5a98cf5fcf34303f2fa8 | refs/heads/master | 2022-07-30T00:51:28.806940 | 2022-07-14T12:07:11 | 2022-07-14T12:07:11 | 209,801,072 | 1 | 0 | Apache-2.0 | 2019-09-20T13:47:26 | 2019-09-20T13:47:26 | null | UTF-8 | Python | false | false | 9,684 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from typing import TYPE_CHECKING, Any, Dict, Optional, Sequence
from airflow.hooks.base import BaseHook
from airflow.models import BaseOperator, BaseOperatorLink, XCom
from airflow.providers.microsoft.azure.hooks.data_factory import (
AzureDataFactoryHook,
AzureDataFactoryPipelineRunException,
AzureDataFactoryPipelineRunStatus,
)
if TYPE_CHECKING:
from airflow.models.taskinstance import TaskInstanceKey
from airflow.utils.context import Context
class AzureDataFactoryPipelineRunLink(BaseOperatorLink):
"""Constructs a link to monitor a pipeline run in Azure Data Factory."""
name = "Monitor Pipeline Run"
def get_link(
self,
operator,
dttm=None,
*,
ti_key: Optional["TaskInstanceKey"] = None,
) -> str:
if ti_key is not None:
run_id = XCom.get_value(key="run_id", ti_key=ti_key)
else:
assert dttm
run_id = XCom.get_one(
key="run_id",
dag_id=operator.dag.dag_id,
task_id=operator.task_id,
execution_date=dttm,
)
conn = BaseHook.get_connection(operator.azure_data_factory_conn_id)
subscription_id = conn.extra_dejson["extra__azure_data_factory__subscriptionId"]
# Both Resource Group Name and Factory Name can either be declared in the Azure Data Factory
# connection or passed directly to the operator.
resource_group_name = operator.resource_group_name or conn.extra_dejson.get(
"extra__azure_data_factory__resource_group_name"
)
factory_name = operator.factory_name or conn.extra_dejson.get(
"extra__azure_data_factory__factory_name"
)
url = (
f"https://adf.azure.com/en-us/monitoring/pipelineruns/{run_id}"
f"?factory=/subscriptions/{subscription_id}/"
f"resourceGroups/{resource_group_name}/providers/Microsoft.DataFactory/"
f"factories/{factory_name}"
)
return url
class AzureDataFactoryRunPipelineOperator(BaseOperator):
"""
Executes a data factory pipeline.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:AzureDataFactoryRunPipelineOperator`
:param azure_data_factory_conn_id: The connection identifier for connecting to Azure Data Factory.
:param pipeline_name: The name of the pipeline to execute.
:param wait_for_termination: Flag to wait on a pipeline run's termination. By default, this feature is
enabled but could be disabled to perform an asynchronous wait for a long-running pipeline execution
using the ``AzureDataFactoryPipelineRunSensor``.
:param resource_group_name: The resource group name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the resource group name provided in the corresponding
connection.
:param factory_name: The data factory name. If a value is not passed in to the operator, the
``AzureDataFactoryHook`` will attempt to use the factory name name provided in the corresponding
connection.
:param reference_pipeline_run_id: The pipeline run identifier. If this run ID is specified the parameters
of the specified run will be used to create a new run.
:param is_recovery: Recovery mode flag. If recovery mode is set to `True`, the specified referenced
pipeline run and the new run will be grouped under the same ``groupId``.
:param start_activity_name: In recovery mode, the rerun will start from this activity. If not specified,
all activities will run.
:param start_from_failure: In recovery mode, if set to true, the rerun will start from failed activities.
The property will be used only if ``start_activity_name`` is not specified.
:param parameters: Parameters of the pipeline run. These parameters are referenced in a pipeline via
``@pipeline().parameters.parameterName`` and will be used only if the ``reference_pipeline_run_id`` is
not specified.
:param timeout: Time in seconds to wait for a pipeline to reach a terminal status for non-asynchronous
waits. Used only if ``wait_for_termination`` is True.
:param check_interval: Time in seconds to check on a pipeline run's status for non-asynchronous waits.
Used only if ``wait_for_termination`` is True.
"""
template_fields: Sequence[str] = (
"azure_data_factory_conn_id",
"resource_group_name",
"factory_name",
"pipeline_name",
"reference_pipeline_run_id",
"parameters",
)
template_fields_renderers = {"parameters": "json"}
ui_color = "#0678d4"
operator_extra_links = (AzureDataFactoryPipelineRunLink(),)
def __init__(
self,
*,
pipeline_name: str,
azure_data_factory_conn_id: str = AzureDataFactoryHook.default_conn_name,
wait_for_termination: bool = True,
resource_group_name: Optional[str] = None,
factory_name: Optional[str] = None,
reference_pipeline_run_id: Optional[str] = None,
is_recovery: Optional[bool] = None,
start_activity_name: Optional[str] = None,
start_from_failure: Optional[bool] = None,
parameters: Optional[Dict[str, Any]] = None,
timeout: int = 60 * 60 * 24 * 7,
check_interval: int = 60,
**kwargs,
) -> None:
super().__init__(**kwargs)
self.azure_data_factory_conn_id = azure_data_factory_conn_id
self.pipeline_name = pipeline_name
self.wait_for_termination = wait_for_termination
self.resource_group_name = resource_group_name
self.factory_name = factory_name
self.reference_pipeline_run_id = reference_pipeline_run_id
self.is_recovery = is_recovery
self.start_activity_name = start_activity_name
self.start_from_failure = start_from_failure
self.parameters = parameters
self.timeout = timeout
self.check_interval = check_interval
def execute(self, context: "Context") -> None:
self.hook = AzureDataFactoryHook(azure_data_factory_conn_id=self.azure_data_factory_conn_id)
self.log.info("Executing the %s pipeline.", self.pipeline_name)
response = self.hook.run_pipeline(
pipeline_name=self.pipeline_name,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
reference_pipeline_run_id=self.reference_pipeline_run_id,
is_recovery=self.is_recovery,
start_activity_name=self.start_activity_name,
start_from_failure=self.start_from_failure,
parameters=self.parameters,
)
self.run_id = vars(response)["run_id"]
# Push the ``run_id`` value to XCom regardless of what happens during execution. This allows for
# retrieval the executed pipeline's ``run_id`` for downstream tasks especially if performing an
# asynchronous wait.
context["ti"].xcom_push(key="run_id", value=self.run_id)
if self.wait_for_termination:
self.log.info("Waiting for pipeline run %s to terminate.", self.run_id)
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.SUCCEEDED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has completed successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(
f"Pipeline run {self.run_id} has failed or has been cancelled."
)
def on_kill(self) -> None:
if self.run_id:
self.hook.cancel_pipeline_run(
run_id=self.run_id,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
)
# Check to ensure the pipeline run was cancelled as expected.
if self.hook.wait_for_pipeline_run_status(
run_id=self.run_id,
expected_statuses=AzureDataFactoryPipelineRunStatus.CANCELLED,
check_interval=self.check_interval,
timeout=self.timeout,
resource_group_name=self.resource_group_name,
factory_name=self.factory_name,
):
self.log.info("Pipeline run %s has been cancelled successfully.", self.run_id)
else:
raise AzureDataFactoryPipelineRunException(f"Pipeline run {self.run_id} was not cancelled.")
| [
"[email protected]"
] | |
92636880ee00dcdaf6082a42f6967c44fa8b6054 | 9bcd8a8de7e34ab52f44070c171e2e12e52e9775 | /setup.py | c7ab57d1d127894b45df406d8c76bdb98355363e | [
"BSD-2-Clause"
] | permissive | miracle2k/localtodo | c419bf5cd8aa5fd6092420577c6155a3d418cd1d | 8598a073d9fe466832b6a952a0b1dc20603d0e7d | refs/heads/master | 2022-04-30T13:36:50.211348 | 2022-03-21T18:45:16 | 2022-03-21T18:45:16 | 5,198,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 754 | py | #!/usr/bin/env python
# coding: utf-8
from setuptools import setup
setup(
name='localtodo',
url='https://github.com/miracle2k/localtodo',
version='1.0',
license='BSD',
author=u'Michael Elsdörfer',
author_email='[email protected]',
description=
'.gitignore local todo files, but sync them through Dropbox.',
py_modules=['localtodo'],
install_requires=['docopt==0.4.1'],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'
],
entry_points="""[console_scripts]\nlocaltodo = localtodo:run\n""",
) | [
"[email protected]"
] | |
805535843094f8434fa9cfb5c22c4c9c99ef2185 | 2c2d2405929b026ac4de77d34538cec623dee5eb | /codes/SRN/models/modules/loss.py | 844e09818490d48d1b9b375b12a65032b32c4075 | [] | no_license | greitzmann/DASR | 9d709cf031561897722f1553842af05fca36855e | f85b22ada54344fd0d94ba31ae596427cb9b5c5b | refs/heads/master | 2023-01-01T12:26:11.563140 | 2020-10-15T16:03:26 | 2020-10-15T16:03:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,636 | py | import random
import torch
import torch.nn as nn
import sys
from PerceptualSimilarity.models import util as ps
# Define GAN loss: [vanilla | lsgan | wgan-gp]
class GANLoss(nn.Module):
def __init__(self, gan_type, real_label_val=1.0, fake_label_val=0.0):
super(GANLoss, self).__init__()
self.gan_type = gan_type.lower()
self.real_label_val = real_label_val
self.fake_label_val = fake_label_val
if self.gan_type == 'vanilla':
self.loss = nn.BCEWithLogitsLoss()
elif self.gan_type == 'lsgan':
self.loss = nn.MSELoss()
elif self.gan_type == 'wgan-gp':
def wgan_loss(input, target):
# target is boolean
return -1 * input.mean() if target else input.mean()
self.loss = wgan_loss
else:
raise NotImplementedError('GAN type [{:s}] is not found'.format(self.gan_type))
def get_target_label(self, input, target_is_real):
if self.gan_type == 'wgan-gp':
return target_is_real
if target_is_real:
return torch.empty_like(input).fill_(self.real_label_val)
else:
return torch.empty_like(input).fill_(self.fake_label_val)
def forward(self, input, target_is_real):
target_label = self.get_target_label(input, target_is_real)
loss = self.loss(input, target_label)
return loss
class GradientPenaltyLoss(nn.Module):
def __init__(self, device=torch.device('cpu')):
super(GradientPenaltyLoss, self).__init__()
self.register_buffer('grad_outputs', torch.Tensor())
self.grad_outputs = self.grad_outputs.to(device)
def get_grad_outputs(self, input):
if self.grad_outputs.size() != input.size():
self.grad_outputs.resize_(input.size()).fill_(1.0)
return self.grad_outputs
def forward(self, interp, interp_crit):
grad_outputs = self.get_grad_outputs(interp_crit)
grad_interp = torch.autograd.grad(outputs=interp_crit, inputs=interp, \
grad_outputs=grad_outputs, create_graph=True, retain_graph=True, only_inputs=True)[0]
grad_interp = grad_interp.view(grad_interp.size(0), -1)
grad_interp_norm = grad_interp.norm(2, dim=1)
loss = ((grad_interp_norm - 1)**2).mean()
return loss
class PerceptualLossLPIPS(nn.Module):
def __init__(self):
super(PerceptualLossLPIPS, self).__init__()
self.loss_network = ps.PerceptualLoss(use_gpu=torch.cuda.is_available())
def forward(self, x, y):
return self.loss_network.forward(x, y, normalize=True).mean()
class PerceptualLoss(nn.Module):
def __init__(self, rotations=False, flips=False):
super(PerceptualLoss, self).__init__()
self.loss = PerceptualLossLPIPS()
self.rotations = rotations
self.flips = flips
def forward(self, x, y):
if self.rotations:
k_rot = random.choice([-1, 0, 1])
x = torch.rot90(x, k_rot, [2, 3])
y = torch.rot90(y, k_rot, [2, 3])
if self.flips:
if random.choice([True, False]):
x = torch.flip(x, (2,))
y = torch.flip(y, (2,))
if random.choice([True, False]):
x = torch.flip(x, (3,))
y = torch.flip(y, (3,))
return self.loss(x, y)
def generator_loss(labels, wasserstein=False, weights=None):
if not isinstance(labels, list):
labels = (labels,)
if weights is None:
weights = [1.0 / len(labels)] * len(labels)
loss = 0.0
for label, weight in zip(labels, weights):
if wasserstein:
loss += weight * torch.mean(-label)
else:
loss += weight * torch.mean(-torch.log(label + 1e-8))
return loss
def discriminator_loss(reals, fakes, wasserstein=False, grad_penalties=None, weights=None):
if not isinstance(reals, list):
reals = (reals,)
if not isinstance(fakes, list):
fakes = (fakes,)
if weights is None:
weights = [1.0 / len(fakes)] * len(fakes)
loss = 0.0
if wasserstein:
if not isinstance(grad_penalties, list):
grad_penalties = (grad_penalties,)
for real, fake, weight, grad_penalty in zip(reals, fakes, weights, grad_penalties):
loss += weight * (-real.mean() + fake.mean() + grad_penalty)
else:
for real, fake, weight in zip(reals, fakes, weights):
loss += weight * (-torch.log(real + 1e-8).mean() - torch.log(1 - fake + 1e-8).mean())
return loss
if __name__ == '__main__':
a = PerceptualLossLPIPS() | [
"[email protected]"
] | |
ce1b27305c6bd0b8a926dbe2218abbb8f297e24b | 5cde21d3eb1667152d4aa7fe489f15339db89551 | /maple/community/process/__init__.py | 1e2c945b04a925467c68181e7a269746fc3c10b2 | [] | no_license | SunmoonSan/PythonDaily | cce0b82c9bfe8e57dc26d8bcb722e165302cf4b0 | c0a95c2ece1b3cb6ef00d1b096fef14892de1ce6 | refs/heads/master | 2022-04-02T13:08:41.692131 | 2020-01-19T12:48:38 | 2020-01-19T12:48:38 | 115,420,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 91 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @desc : Created by San on 2019/12/13 17:23
| [
"[email protected]"
] | |
8838064973dcf235bd1744d1dadead87051a80ea | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_widest.py | 8492170c13955906272fe657ccf1e56cec9420c8 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 236 | py |
from xai.brain.wordbase.adjectives._wide import _WIDE
#calss header
class _WIDEST(_WIDE, ):
def __init__(self,):
_WIDE.__init__(self)
self.name = "WIDEST"
self.specie = 'adjectives'
self.basic = "wide"
self.jsondata = {}
| [
"[email protected]"
] | |
0ee0fecb3d4df02cb0dc4970912acd258cfee73d | 549f00e84cf77f08b2d72b3a298174143e23222e | /pytest_echo.py | 025e4f09603e8814d066cc041fb8926ac1650558 | [
"MIT"
] | permissive | hugovk/pytest-echo | 9a94c6a246ae1803dd6b391f56c35a7d0472f209 | 939793448e7d7e80a356aafc4dbb58bbedbe7e2c | refs/heads/master | 2020-08-10T05:26:13.397571 | 2018-04-22T17:12:07 | 2018-04-22T17:12:07 | 214,268,306 | 0 | 0 | MIT | 2019-10-10T19:27:28 | 2019-10-10T19:27:28 | null | UTF-8 | Python | false | false | 5,239 | py | # -*- coding: utf-8 -*-
from __future__ import print_function
import fnmatch
import os
from pprint import pformat
import pkg_resources
from pkg_resources import DistributionNotFound
__version__ = '1.6.0'
def get_installed_distributions():
"""
Return a list of installed Distribution objects.
"""
return [d for d in pkg_resources.working_set]
def get_attr(obj, attr, default='NOT FOUND'):
"""Recursive get object's attribute. May use dot notation.
>>> class C(object):
... pass
>>> a = C()
>>> a.b = C()
>>> a.b.c = 4
>>> get_attr(a, 'b.c')
4
>>> get_attr(a, 'b.c.y', None)
>>> get_attr(a, 'b.c.y', 1)
1
>>> get_attr([0,1,2], '2')
2
>>> get_attr([0,1,(21, 22)], '2.1')
22
>>> get_attr({'key': 11}, 'key')
11
>>> get_attr({'key': {'key': 11}}, 'key.key')
11
"""
if '.' not in attr:
try:
if hasattr(obj, attr):
return getattr(obj, attr, default)
elif isinstance(obj, (list, tuple, set)):
return obj[int(attr)]
elif isinstance(obj, dict):
return obj[attr]
else:
return default
except Exception as e: # pragma: no cover
return str(e)
else:
L = attr.split('.')
return get_attr(get_attr(obj, L[0], default), '.'.join(L[1:]), default)
def get_module_attribute(path):
"""
Returns a attribute value base on it's full path.
The `attribute` can be either a module attribute (ie. os.path.curdir)
or a object attribute (ie. linecache.cache.__class__)
Warning: Be careful when use thi function as it load any module in the path
and this will execute any module's level code
:param path: full path to the attribute
:return:
>>> print(get_module_attribute('linecache.cache.__class__'))
<... 'dict'>
>>> print(get_module_attribute('os.path.curdir'))
'.'
>>> print(get_module_attribute('wrong'))
('Unable to load %s', 'wrong')
"""
parts = path.split('.')
parent = ""
pkg = None
try:
for i, part in enumerate(parts):
try:
if parent:
module_name = "%s.%s" % (parent, parts[i])
else:
module_name = parts[i]
pkg = __import__(module_name, fromlist=[parent])
parent = module_name
except ImportError:
if hasattr(pkg, part):
return pformat(get_attr(pkg, ".".join(parts[i:])))
raise Exception('Unable to load %s', path)
except Exception as e:
return str(e)
def get_env(var_name):
if '*' in var_name:
targets = [(key, value)
for key, value in os.environ.items()
if fnmatch.fnmatch(key, var_name)]
else:
targets = [(var_name, os.environ.get(var_name, "<not set>"))]
return targets
def get_version(package_name):
if '*' in package_name:
targets = [(i.key, i.version)
for i in get_installed_distributions()
if fnmatch.fnmatch(i.key, package_name)]
else:
targets = [(package_name, _get_version(package_name))]
return targets
def _get_version(package_name):
try:
import pkg_resources
return pkg_resources.require(package_name)[0].version
except (ImportError, AttributeError, TypeError, DistributionNotFound):
pass
try:
pkg = __import__(package_name)
except ImportError:
return '<unable to load package>'
for attr_name in ('get_version', '__version__', 'VERSION', 'version'):
if hasattr(pkg, attr_name):
attr = getattr(pkg, attr_name)
if callable(attr):
return attr()
else:
return attr
def pytest_report_header(config):
ret = []
if config.option.echo_envs:
ret.append("Environment:")
data = []
for k in config.option.echo_envs:
data.extend(get_env(k))
ret.append("\n".join([" %s: %s" % (k, v)
for k, v in sorted(data)]))
if config.option.echo_versions:
ret.append("Package version:")
data = []
for k in config.option.echo_versions:
data.extend(get_version(k))
ret.append("\n".join([" %s: %s" % (k, v)
for k, v in sorted(data)]))
if config.option.echo_attribues:
ret.append("Inspections:")
ret.append("\n".join([" %s: %s" % (k, get_module_attribute(k))
for k in config.option.echo_attribues]))
if ret:
return "\n".join(ret)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--echo-env', action='append', dest="echo_envs",
default=[], help="environment to print")
group.addoption('--echo-version', action='append', dest="echo_versions",
default=[], help="package version to print")
group.addoption('--echo-attr', action='append', dest="echo_attribues",
default=[], help="attribute to print (full path)")
| [
"[email protected]"
] | |
9c68f21e289ac893f938e83bb2be5f054a2a7561 | 2f6c3e78de825b14cc6d471ba231724d819b7436 | /tasks/ogle.py | 2c1ff53316fd09c5f8108a519f3f81fdfee981f0 | [
"MIT"
] | permissive | astrocatalogs/supernovae | 3f685d447b56c741081acffc6de0c9818149bb47 | 9585d2ae053f15fa91ab5900b5ae962c6a508037 | refs/heads/master | 2023-03-12T12:19:01.300505 | 2023-03-10T16:45:53 | 2023-03-10T16:45:53 | 62,802,442 | 42 | 18 | MIT | 2023-03-14T20:39:37 | 2016-07-07T11:42:13 | Python | UTF-8 | Python | false | false | 6,846 | py | """Import tasks for OGLE.
"""
import os
import re
from astrocats.catalog.utils import is_number, jd_to_mjd, pbar, uniq_cdl
from bs4 import BeautifulSoup, NavigableString, Tag
from decimal import Decimal
from ..supernova import SUPERNOVA
def do_ogle(catalog):
task_str = catalog.get_current_task_str()
basenames = [
'transients', 'transients/2015', 'transients/2014b', 'transients/2014',
'transients/2013', 'transients/2012'
]
oglenames = []
ogleupdate = [True, False, False, False, False]
for b, bn in enumerate(pbar(basenames, task_str)):
if catalog.args.update and not ogleupdate[b]:
continue
filepath = os.path.join(catalog.get_current_task_repo(), 'OGLE-')
filepath += bn.replace('/', '-') + '-transients.html'
htmltxt = catalog.load_url(
'http://ogle.astrouw.edu.pl/ogle4/' + bn + '/transients.html',
filepath)
if not htmltxt:
continue
soup = BeautifulSoup(htmltxt, 'html5lib')
links = soup.findAll('a')
breaks = soup.findAll('br')
datalinks = []
datafnames = []
for a in links:
if a.has_attr('href'):
if '.dat' in a['href']:
datalinks.append('http://ogle.astrouw.edu.pl/ogle4/' + bn +
'/' + a['href'])
datafnames.append(
bn.replace('/', '-') + '-' + a['href'].replace('/',
'-'))
ec = -1
reference = 'OGLE-IV Transient Detection System'
refurl = 'http://ogle.astrouw.edu.pl/ogle4/transients/transients.html'
for bi, br in enumerate(pbar(breaks, task_str)):
sibling = br.nextSibling
if 'Ra,Dec=' in sibling:
line = sibling.replace('\n', '').split('Ra,Dec=')
name = line[0].strip()
ec += 1
if 'NOVA' in name or 'dupl' in name:
continue
if name in oglenames:
continue
oglenames.append(name)
name = catalog.add_entry(name)
mySibling = sibling.nextSibling
atelref = ''
claimedtype = ''
while 'Ra,Dec=' not in mySibling:
if isinstance(mySibling, NavigableString):
if not claimedtype and 'class=' in str(mySibling):
claimedtype = re.sub(r'\([^)]*\)', '',
str(mySibling).split('=')[-1])
claimedtype = claimedtype.replace('SN', '').strip()
if claimedtype == '-':
claimedtype = ''
if isinstance(mySibling, Tag):
atela = mySibling
if (atela and atela.has_attr('href') and
'astronomerstelegram' in atela['href']):
atelref = atela.contents[0].strip()
atelurl = atela['href']
mySibling = mySibling.nextSibling
if mySibling is None:
break
# nextSibling = sibling.nextSibling
# if ((isinstance(nextSibling, Tag) and
# nextSibling.has_attr('alt') and
# nextSibling.contents[0].strip() != 'NED')):
# radec = nextSibling.contents[0].strip().split()
# else:
# radec = line[-1].split()
# ra = radec[0]
# dec = radec[1]
fname = os.path.join(catalog.get_current_task_repo(),
'OGLE/') + datafnames[ec]
csvtxt = catalog.load_url(datalinks[ec], fname)
lcdat = csvtxt.splitlines()
sources = [
catalog.entries[name].add_source(
name=reference, url=refurl)
]
catalog.entries[name].add_quantity(SUPERNOVA.ALIAS, name,
sources[0])
if atelref and atelref != 'ATel#----':
sources.append(catalog.entries[name].add_source(
name=atelref, url=atelurl))
sources = uniq_cdl(sources)
if name.startswith('OGLE'):
if name[4] == '-':
if is_number(name[5:9]):
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, name[5:9], sources)
else:
if is_number(name[4:6]):
catalog.entries[name].add_quantity(
SUPERNOVA.DISCOVER_DATE, '20' + name[4:6],
sources)
# RA and Dec from OGLE pages currently not reliable
# catalog.entries[name].add_quantity(SUPERNOVA.RA, ra, sources)
# catalog.entries[name].add_quantity(SUPERNOVA.DEC, dec,
# sources)
if claimedtype and claimedtype != '-':
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
claimedtype, sources)
elif ('SN' not in name and
SUPERNOVA.CLAIMED_TYPE not in catalog.entries[name]):
catalog.entries[name].add_quantity(SUPERNOVA.CLAIMED_TYPE,
'Candidate', sources)
for row in lcdat:
row = row.split()
mjd = str(jd_to_mjd(Decimal(row[0])))
magnitude = row[1]
if float(magnitude) > 90.0:
continue
e_mag = row[2]
upperlimit = False
if e_mag == '-1' or float(e_mag) > 10.0:
e_mag = ''
upperlimit = True
catalog.entries[name].add_photometry(
time=mjd,
u_time='MJD',
band='I',
magnitude=magnitude,
e_magnitude=e_mag,
system='Vega',
source=sources,
upperlimit=upperlimit)
if catalog.args.update:
catalog.journal_entries()
if catalog.args.travis and bi >= catalog.TRAVIS_QUERY_LIMIT:
break
catalog.journal_entries()
return
| [
"[email protected]"
] | |
ed5aaf4d9c069dfae5c52ce541ca6227e507404e | 358aaf68f3c60ebbbd86b3bc66d4e6c098bcb39e | /fonts/tsvga_et4000_8x16.py | ec50d5db6c710d173aebde5e57c209dc19065ccd | [
"MIT"
] | permissive | ccccmagicboy/st7735_mpy | d2de0046abd81978d5176dace45a40758377af82 | b15f1bde69fbe6e0eb4931c57e71c136d8e7f024 | refs/heads/master | 2022-08-28T23:18:04.353733 | 2020-05-28T04:19:21 | 2020-05-28T04:19:21 | 254,869,035 | 7 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,759 | py | """converted from ..\fonts\TSVGA_ET4000_8x16.bin """
WIDTH = 8
HEIGHT = 16
FIRST = 0x20
LAST = 0x7f
_FONT =\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x18\x3c\x3c\x3c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x66\x66\x66\x24\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x6c\x6c\xfe\x6c\x6c\x6c\xfe\x6c\x6c\x00\x00\x00\x00'\
b'\x18\x18\x7c\xc6\xc2\xc0\x7c\x06\x06\x86\xc6\x7c\x18\x18\x00\x00'\
b'\x00\x00\x00\x00\xc2\xc6\x0c\x18\x30\x60\xc6\x86\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x6c\x38\x76\xdc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x30\x30\x30\x60\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x0c\x18\x30\x30\x30\x30\x30\x30\x18\x0c\x00\x00\x00\x00'\
b'\x00\x00\x30\x18\x0c\x0c\x0c\x0c\x0c\x0c\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x3c\xff\x3c\x66\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x18\x18\x7e\x18\x18\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x18\x30\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x02\x06\x0c\x18\x30\x60\xc0\x80\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xce\xde\xf6\xe6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x18\x38\x78\x18\x18\x18\x18\x18\x18\x7e\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x0c\x18\x30\x60\xc0\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\x06\x06\x3c\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x0c\x1c\x3c\x6c\xcc\xfe\x0c\x0c\x0c\x1e\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc0\xc0\xc0\xfc\x06\x06\x06\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x60\xc0\xc0\xfc\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x06\x06\x0c\x18\x30\x30\x30\x30\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7c\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\x7e\x06\x06\x06\x0c\x78\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x18\x18\x00\x00\x00\x18\x18\x30\x00\x00\x00\x00'\
b'\x00\x00\x00\x06\x0c\x18\x30\x60\x30\x18\x0c\x06\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7e\x00\x00\x7e\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x60\x30\x18\x0c\x06\x0c\x18\x30\x60\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x0c\x18\x18\x18\x00\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xde\xde\xde\xdc\xc0\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x38\x6c\xc6\xc6\xfe\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x66\x66\x66\x66\xfc\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xc0\xc0\xc2\x66\x3c\x00\x00\x00\x00'\
b'\x00\x00\xf8\x6c\x66\x66\x66\x66\x66\x66\x6c\xf8\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xfe\x66\x62\x68\x78\x68\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x3c\x66\xc2\xc0\xc0\xde\xc6\xc6\x66\x3a\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xfe\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x3c\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x1e\x0c\x0c\x0c\x0c\x0c\xcc\xcc\xcc\x78\x00\x00\x00\x00'\
b'\x00\x00\xe6\x66\x66\x6c\x78\x78\x6c\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\xf0\x60\x60\x60\x60\x60\x60\x62\x66\xfe\x00\x00\x00\x00'\
b'\x00\x00\xc6\xee\xfe\xfe\xd6\xc6\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\xc6\xe6\xf6\xfe\xde\xce\xc6\xc6\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\xc6\xd6\xde\x7c\x0c\x0e\x00\x00'\
b'\x00\x00\xfc\x66\x66\x66\x7c\x6c\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x7c\xc6\xc6\x60\x38\x0c\x06\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x7e\x7e\x5a\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\xc6\x6c\x38\x10\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\xc6\xc6\xd6\xd6\xd6\xfe\xee\x6c\x00\x00\x00\x00'\
b'\x00\x00\xc6\xc6\x6c\x7c\x38\x38\x7c\x6c\xc6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x66\x66\x66\x66\x3c\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\xfe\xc6\x86\x0c\x18\x30\x60\xc2\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x3c\x30\x30\x30\x30\x30\x30\x30\x30\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x80\xc0\xe0\x70\x38\x1c\x0e\x06\x02\x00\x00\x00\x00'\
b'\x00\x00\x3c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x0c\x3c\x00\x00\x00\x00'\
b'\x10\x38\x6c\xc6\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\x00\x00'\
b'\x30\x30\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x78\x0c\x7c\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\xe0\x60\x60\x78\x6c\x66\x66\x66\x66\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc0\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x1c\x0c\x0c\x3c\x6c\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xfe\xc0\xc0\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x38\x6c\x64\x60\xf0\x60\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\xcc\x78\x00'\
b'\x00\x00\xe0\x60\x60\x6c\x76\x66\x66\x66\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x00\x38\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x06\x06\x00\x0e\x06\x06\x06\x06\x06\x06\x66\x66\x3c\x00'\
b'\x00\x00\xe0\x60\x60\x66\x6c\x78\x78\x6c\x66\xe6\x00\x00\x00\x00'\
b'\x00\x00\x38\x18\x18\x18\x18\x18\x18\x18\x18\x3c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xec\xfe\xd6\xd6\xd6\xd6\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x66\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\xc6\xc6\xc6\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xdc\x66\x66\x66\x66\x66\x7c\x60\x60\xf0\x00'\
b'\x00\x00\x00\x00\x00\x76\xcc\xcc\xcc\xcc\xcc\x7c\x0c\x0c\x1e\x00'\
b'\x00\x00\x00\x00\x00\xdc\x76\x66\x60\x60\x60\xf0\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x7c\xc6\x60\x38\x0c\xc6\x7c\x00\x00\x00\x00'\
b'\x00\x00\x10\x30\x30\xfc\x30\x30\x30\x30\x36\x1c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xcc\xcc\xcc\xcc\xcc\xcc\x76\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\x66\x66\x66\x66\x66\x3c\x18\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xd6\xd6\xd6\xfe\x6c\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\x6c\x38\x38\x38\x6c\xc6\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x00\xc6\xc6\xc6\xc6\xc6\xc6\x7e\x06\x0c\xf8\x00'\
b'\x00\x00\x00\x00\x00\xfe\xcc\x18\x30\x60\xc6\xfe\x00\x00\x00\x00'\
b'\x00\x00\x0e\x18\x18\x18\x70\x18\x18\x18\x18\x0e\x00\x00\x00\x00'\
b'\x00\x00\x18\x18\x18\x18\x00\x18\x18\x18\x18\x18\x00\x00\x00\x00'\
b'\x00\x00\x70\x18\x18\x18\x0e\x18\x18\x18\x18\x70\x00\x00\x00\x00'\
b'\x00\x00\x76\xdc\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'\
b'\x00\x00\x00\x00\x10\x38\x6c\xc6\xc6\xc6\xfe\x00\x00\x00\x00\x00'\
FONT = memoryview(_FONT)
| [
"[email protected]"
] | |
f0d990d45a27fde720efb4dff618a7fd5ef391b2 | 8600ea155f279e5a8dfe5a1926038511f6b6a7ea | /sale_crm/wizard/__init__.py | 7c43908361fc6be916f06247bd16776a8e4c1776 | [] | no_license | MarkNorgate/addons-EAD | c2fff89ab16fce3ba19fbe433ee5863705a6f4e5 | 840f28642b5d328e4b86839c413e5164622295a5 | refs/heads/master | 2020-04-23T22:11:00.164438 | 2015-07-22T12:24:53 | 2015-07-22T12:24:53 | 39,501,011 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,090 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>). All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import makesale
import makecase
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
a468ccdf43c987d187ed161205454ce4aef48536 | 7aa6a5e94c9dea6686a55f316b78a91e8e301ad0 | /31-nato-dictionary/codewars_unittest/__init__.py | 67596a1dec6777ad40ea468b610619eb0038ff53 | [] | no_license | marinasupernova/codewars | 3ede0d61693462551112bee1019c34396d91b2d9 | fcea73a6cf564159a7fc776edc47cf57fab121df | refs/heads/main | 2023-04-26T07:17:56.149954 | 2021-05-31T06:01:13 | 2021-05-31T06:01:13 | 345,687,012 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | from .test_runner import CodewarsTestRunner
| [
"[email protected]"
] | |
54e305cc17a0c2232390a83ffbdeb8ed103b393e | 219d7cf7cf00b778ff1a5709406c144fcf2132f3 | /First Steps in Coding - Lab/07. Projects Creation.py | 7ca4becdbaa4bc97c0ff32e779c16f999679df79 | [] | no_license | SilviaKoynova/Softuni-Programming-Basics-Python | e8e175419383815c65c4e110fdb2b752d940e887 | 0dfef0850f2cb8471dfee1af89f137be4e887cb8 | refs/heads/main | 2023-07-13T00:35:09.389302 | 2021-08-27T07:43:45 | 2021-08-27T07:43:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 163 | py | name = input()
projects = int(input())
need_hours = projects * 3
print(f"The architect {name} will need {need_hours} hours to complete {projects} project/s.")
| [
"[email protected]"
] | |
736887a4862a68ddb38a06f891def851858936db | 9d8acc20d2ee1d1957849dfb71c22e0dae2d8c5c | /baomoicrawl/venv/Lib/site-packages/twisted/test/test_ftp_options.py | ef567bbb49bfce24ee4cb271b1a59b1a8730dd7a | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | thuy4tbn99/TranTruongThuy_17021178_Nhom4_Crawler | b0fdedee2942a12d9f64dfed93f43802dc5ab340 | 87c8c07433466bbc43a24ea089f75baeb467c356 | refs/heads/master | 2022-11-27T21:36:33.917491 | 2020-08-10T23:24:42 | 2020-08-10T23:24:42 | 286,583,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,765 | py | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.tap.ftp}.
"""
from twisted.trial.unittest import TestCase
from twisted.cred import credentials, error
from twisted.tap.ftp import Options
from twisted.python import versions
from twisted.python.filepath import FilePath
class FTPOptionsTests(TestCase):
"""
Tests for the command line option parser used for C{twistd ftp}.
"""
usernamePassword = (b'iamuser', b'thisispassword')
def setUp(self):
"""
Create a file with two users.
"""
self.filename = self.mktemp()
f = FilePath(self.filename)
f.setContent(b':'.join(self.usernamePassword))
self.options = Options()
def test_passwordfileDeprecation(self):
"""
The C{--password-file} option will emit a warning stating that
said option is deprecated.
"""
self.callDeprecated(
versions.Version("Twisted", 11, 1, 0),
self.options.opt_password_file, self.filename)
def test_authAdded(self):
"""
The C{--auth} command-line option will add a checker to the list of
checkers
"""
numCheckers = len(self.options['credCheckers'])
self.options.parseOptions(['--auth', 'file:' + self.filename])
self.assertEqual(len(self.options['credCheckers']), numCheckers + 1)
def test_authFailure(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that fails with L{UnauthorizedLogin} when
presented with credentials that are unknown to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
invalid = credentials.UsernamePassword(self.usernamePassword[0], 'fake')
return (checker.requestAvatarId(invalid)
.addCallbacks(
lambda ignore: self.fail("Wrong password should raise error"),
lambda err: err.trap(error.UnauthorizedLogin)))
def test_authSuccess(self):
"""
The checker created by the C{--auth} command-line option returns a
L{Deferred} that returns the avatar id when presented with credentials
that are known to that checker.
"""
self.options.parseOptions(['--auth', 'file:' + self.filename])
checker = self.options['credCheckers'][-1]
correct = credentials.UsernamePassword(*self.usernamePassword)
return checker.requestAvatarId(correct).addCallback(
lambda username: self.assertEqual(username, correct.username)
)
| [
"[email protected]"
] | |
9562bc0b7e2dcc38f7a84b31462b6d5fd5598619 | 3c898b1aec7009110c63504d5a56e31914625d1b | /acrylamid/filters/rstx_youtube.py | 1866872d0d44360221e12addf431c60a545739b3 | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | MagicSword/acrylamid | e294b151ed6305f37fc5a5fdd4f1f0fb999a22f7 | 6f34bc5fb2175af1103aec7a910ef48a6842de03 | refs/heads/master | 2021-01-16T21:30:58.564719 | 2012-06-22T16:00:50 | 2012-06-22T16:45:38 | 4,817,948 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,988 | py | # -*- encoding: utf-8 -*-
#
# Copyright 2012 posativ <[email protected]>. All rights reserved.
# License: BSD Style, 2 clauses. see acrylamid/__init__.py
from docutils import nodes
from docutils.parsers.rst import Directive, directives
match = ['youtube', 'yt']
def align(argument):
return directives.choice(argument, ('left', 'center', 'right'))
class YouTube(Directive):
"""reStructuredText directive that creates an embed object to display
a video from Youtube (:options: are optional).
Usage example::
.. youtube:: ZPJlyRv_IGI
:start: 34
:align: center
:height: 1280
:width: 720
:ssl:
"""
required_arguments = 1
optional_arguments = 0
option_spec = {
'height': directives.length_or_unitless,
'width': directives.length_or_percentage_or_unitless,
'border': directives.length_or_unitless,
'align': align,
'start': int,
'ssl': directives.flag,
}
has_content = False
def run(self):
alignments = {
'left': '0',
'center': '0 auto',
'right': '0 0 0 auto',
}
uri = ('https://' if 'ssl' in self.options else 'http://') \
+ 'www.youtube-nocookie.com/embed/' + self.arguments[0]
self.options['uri'] = uri
self.options['align'] = alignments[self.options.get('align', 'center')]
self.options.setdefault('width', '680px')
self.options.setdefault('height', '382px')
self.options.setdefault('border', 0)
self.options.setdefault('start', 0)
YT_EMBED = """<iframe width="%(width)s" height="%(height)s" src="%(uri)s" \
frameborder="%(border)s" style="display: block; margin: %(align)s;" \
start="%(start)i" class="video" allowfullscreen></iframe>"""
return [nodes.raw('', YT_EMBED % self.options, format='html')]
def makeExtension():
return YouTube
| [
"[email protected]"
] | |
b9cea96bd1fe04ff6d961295ea869a78c3e571e4 | dfab6798ece135946aebb08f93f162c37dd51791 | /core/luban/db/models.py | e30723a4165928648d1c38b81f47e476985bd1ca | [] | no_license | yxqd/luban | 405f5f7dcf09015d214079fe7e23d644332be069 | 00f699d15c572c8bf160516d582fa37f84ac2023 | refs/heads/master | 2020-03-20T23:08:45.153471 | 2012-05-18T14:52:43 | 2012-05-18T14:52:43 | 137,831,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,608 | py | # -*- Python -*-
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2006-2011 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# example base class of model
# from sqlalchemy.ext.declarative import declarative_base
# Base = declarative_base()
# XXX: thinking of use metaclass...
class ModelCollector:
def __new__(cls, name, bases, attributes, **kwds):
# the created class
created = super().__new__(cls, name, bases, attributes, **kwds)
model_registry.register(created)
return created
class ModelRegistry:
def __init__(self):
self.models = {}
return
def register(self, cls):
self.models[cls.__name__] = cls
return
def __getattr__(self, name):
return self.models[name]
model_registry = ModelRegistry()
# method to load all db models in a python sub-package
def loadModels(subpkg):
# the implementation just import all sub modules in the sub-pkg
# recursively
path = subpkg.__path__
import os
import pkgutil
prefix = subpkg.__name__ + '.'
for loader, module_name, is_pkg in pkgutil.walk_packages(path, prefix):
found = loader.find_module(module_name)
if not found:
print ("%s not found" % module_name)
else:
mod = found.load_module(module_name)
continue
return
# End of file
| [
"[email protected]"
] | |
3e488c1e6d8440ad53c140620d92ef2e370ce8d9 | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/crnn_seq2seq_ocr/export.py | 5e3a5b228456dd05aac02549e97f25cb124b1625 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 2,428 | py | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""
export.
"""
import os
import numpy as np
from mindspore import context, Tensor
from mindspore.train.serialization import load_checkpoint, load_param_into_net, export
from src.attention_ocr import AttentionOCRInfer
from src.model_utils.config import config
from src.model_utils.device_adapter import get_device_id
def get_model():
'''generate model'''
context.set_context(mode=context.GRAPH_MODE, device_target=config.device_target, device_id=get_device_id())
# Network
network = AttentionOCRInfer(config.eval_batch_size,
int(config.img_width / 4),
config.encoder_hidden_size,
config.decoder_hidden_size,
config.decoder_output_size,
config.max_length,
config.dropout_p)
checkpoint_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), config.checkpoint_path)
ckpt = load_checkpoint(checkpoint_path)
load_param_into_net(network, ckpt)
network.set_train(False)
print("Checkpoint loading Done!")
sos_id = config.characters_dictionary.go_id
images = Tensor(np.zeros((config.eval_batch_size, 3, config.img_height, config.img_width),
dtype=np.float32))
decoder_hidden = Tensor(np.zeros((1, config.eval_batch_size, config.decoder_hidden_size),
dtype=np.float16))
decoder_input = Tensor((np.ones((config.eval_batch_size, 1)) * sos_id).astype(np.int32))
inputs = (images, decoder_input, decoder_hidden)
export(network, *inputs, file_name=config.file_name, file_format=config.file_format)
if __name__ == '__main__':
get_model()
| [
"[email protected]"
] | |
38eaa72b941d96798a70a6dda2b4584e8d01c6e4 | 031dbb2a3ea47a0483db310db9f98796cc83c500 | /787_Cheapest Flights Within K Stops.py | 6023db6bc0143b13eabbfbbe62dfaa8852018733 | [] | no_license | Shwan-Yu/Data_Structures_and_Algorithms | 429fb127983e32931f2168f44ef1484c1cc4c87f | 9126c2089e41d4d7fd3a204115eba2b5074076ad | refs/heads/master | 2020-03-27T11:46:59.947303 | 2019-08-23T15:15:21 | 2019-08-23T15:15:21 | 146,507,496 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 576 | py | class Solution(object):
def findCheapestPrice(self, n, flights, src, dst, K):
"""
:type n: int
:type flights: List[List[int]]
:type src: int
:type dst: int
:type K: int
:rtype: int
"""
if not flights: return 0
dp = [float("inf")] * n
dp[src] = 0
for k in range(K+1):
dp_cur = dp[:]
for (a, i, price) in flights:
dp_cur[i] = min(dp_cur[i], dp[a] + price)
dp = dp_cur
return dp[dst] if dp[dst] != float("inf") else -1
| [
"[email protected]"
] | |
17f42d961d07ae670ad5a9895a53fe22b9e5e27b | 3def27e101ca346af6b30247769719f5cd5a27c0 | /indiaos/config/docs.py | 95939bb230467499359e5175e7a089a84d88ecaa | [
"MIT"
] | permissive | anto-christo/indiaos | 9fb94527092570981288b42a05001cf33b61b522 | 4b029cf86e49dcabad852312293e6fa5116d4155 | refs/heads/master | 2020-09-13T19:50:15.187112 | 2019-11-20T08:30:13 | 2019-11-20T08:30:13 | 222,887,034 | 1 | 0 | NOASSERTION | 2019-11-20T08:24:27 | 2019-11-20T08:24:26 | null | UTF-8 | Python | false | false | 313 | py | """
Configuration for docs
"""
# source_link = "https://github.com/[org_name]/indiaos"
# docs_base_url = "https://[org_name].github.io/indiaos"
# headline = "App that does everything"
# sub_heading = "Yes, you got that right the first time, everything"
def get_context(context):
context.brand_html = "IndiaOS"
| [
"[email protected]"
] | |
bfcf8014c00faabd8828474c77a4d96497ba9a38 | da54cb56e69ca730156156ca70a720cfbd7723ea | /others/coffee_plackett/mindsdb_acc.py | 78dfe064c969ce8ad89c969d29d4efe3ffbc3f23 | [
"MIT"
] | permissive | setohe0909/mindsdb-examples | b4f7908aa0c96fc0ea0721931f95bc4960bc2867 | 04fc9b4ad9bb8e960a996e1c4eab1e6054bca8ff | refs/heads/master | 2022-11-27T00:21:16.114913 | 2020-08-06T15:33:17 | 2020-08-06T15:33:17 | 296,442,864 | 1 | 0 | MIT | 2020-09-17T21:08:52 | 2020-09-17T21:08:51 | null | UTF-8 | Python | false | false | 237 | py | from mindsdb_native import Predictor
mdb = Predictor(name='coffee_predictor')
mdb.learn(from_data='data.tsv', to_predict=['Coffe_Malt', 'Chocolat', 'Gold', 'Medium_Barley', 'Dark_Barley', 'Dandelion', 'Beets', 'Chicory_Roots', 'Figs'])
| [
"[email protected]"
] | |
484d7007aa18126e562a439f5ddb39f19a4e0ea8 | 908655251066427f654ee33ebdf804f9f302fcc3 | /Tests/CartPoleAST/CartPoleNdRewardt/MultiCartPoleNd_RLNonInter.py | 7bde5b51f00c93349bfd677128345b1493e7a0c2 | [] | no_license | maxiaoba/MCTSPO | be567f80f1dcf5c35ac857a1e6690e1ac599a59d | eedfccb5a94e089bd925b58f3d65eef505378bbc | refs/heads/main | 2023-07-05T02:20:16.752650 | 2021-07-06T06:04:40 | 2021-07-06T06:04:40 | 381,811,407 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,181 | py | import os
os.environ["CUDA_VISIBLE_DEVICES"]="-1" #just use CPU
# from garage.tf.algos.trpo import TRPO
from garage.baselines.zero_baseline import ZeroBaseline
from mylab.envs.tfenv import TfEnv
from garage.tf.policies.gaussian_mlp_policy import GaussianMLPPolicy
from garage.tf.policies.gaussian_lstm_policy import GaussianLSTMPolicy
from garage.tf.optimizers.conjugate_gradient_optimizer import ConjugateGradientOptimizer, FiniteDifferenceHvp
from garage.misc import logger
from mylab.rewards.ast_reward import ASTReward
from mylab.envs.ast_env import ASTEnv
from mylab.simulators.policy_simulator import PolicySimulator
from CartPoleNd.cartpole_nd import CartPoleNdEnv
from mylab.algos.trpo import TRPO
import os.path as osp
import argparse
# from example_save_trials import *
import tensorflow as tf
import joblib
import math
import numpy as np
import mcts.BoundedPriorityQueues as BPQ
import csv
# Logger Params
parser = argparse.ArgumentParser()
parser.add_argument('--exp_name', type=str, default="cartpole")
parser.add_argument('--n_trial', type=int, default=5)
parser.add_argument('--trial_start', type=int, default=0)
parser.add_argument('--n_itr', type=int, default=2500)
parser.add_argument('--batch_size', type=int, default=4000)
parser.add_argument('--snapshot_mode', type=str, default="gap")
parser.add_argument('--snapshot_gap', type=int, default=500)
parser.add_argument('--log_dir', type=str, default='./Data/AST/RLNonInter')
parser.add_argument('--args_data', type=str, default=None)
args = parser.parse_args()
top_k = 10
max_path_length = 100
interactive = False
tf.set_random_seed(0)
sess = tf.Session()
sess.__enter__()
# Instantiate the env
env_inner = CartPoleNdEnv(nd=10,use_seed=False)
data = joblib.load("../CartPole/Data/Train/itr_50.pkl")
policy_inner = data['policy']
reward_function = ASTReward()
simulator = PolicySimulator(env=env_inner,policy=policy_inner,max_path_length=max_path_length)
env = TfEnv(ASTEnv(interactive=interactive,
simulator=simulator,
sample_init_state=False,
s_0=[0.0, 0.0, 0.0 * math.pi / 180, 0.0],
reward_function=reward_function,
))
# Create policy
policy = GaussianLSTMPolicy(name='lstm_policy',
env_spec=env.spec,
hidden_dim=128,
use_peepholes=True)
with open(osp.join(args.log_dir, 'total_result.csv'), mode='w') as csv_file:
fieldnames = ['step_count']
for i in range(top_k):
fieldnames.append('reward '+str(i))
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
for trial in range(args.trial_start,args.trial_start+args.n_trial):
# Create the logger
log_dir = args.log_dir+'/'+str(trial)
tabular_log_file = osp.join(log_dir, 'process.csv')
text_log_file = osp.join(log_dir, 'text.txt')
params_log_file = osp.join(log_dir, 'args.txt')
logger.set_snapshot_dir(log_dir)
logger.set_snapshot_mode(args.snapshot_mode)
logger.set_snapshot_gap(args.snapshot_gap)
logger.log_parameters_lite(params_log_file, args)
if trial > args.trial_start:
old_log_dir = args.log_dir+'/'+str(trial-1)
logger.pop_prefix()
logger.remove_text_output(osp.join(old_log_dir, 'text.txt'))
logger.remove_tabular_output(osp.join(old_log_dir, 'process.csv'))
logger.add_text_output(text_log_file)
logger.add_tabular_output(tabular_log_file)
logger.push_prefix("["+args.exp_name+'_trial '+str(trial)+"]")
np.random.seed(trial)
params = policy.get_params()
sess.run(tf.variables_initializer(params))
baseline = ZeroBaseline(env_spec=env.spec)
optimizer = ConjugateGradientOptimizer(hvp_approach=FiniteDifferenceHvp(base_eps=1e-5))
top_paths = BPQ.BoundedPriorityQueue(top_k)
algo = TRPO(
env=env,
policy=policy,
baseline=baseline,
batch_size=args.batch_size,
step_size=0.1,
n_itr=args.n_itr,
store_paths=True,
optimizer= optimizer,
max_path_length=max_path_length,
top_paths = top_paths,
plot=False,
)
algo.train(sess=sess, init_var=False)
row_content = dict()
row_content['step_count'] = args.n_itr*args.batch_size
i = 0
for (r,action_seq) in algo.top_paths:
row_content['reward '+str(i)] = r
i += 1
writer.writerow(row_content) | [
"[email protected]"
] | |
5e58b6483a21d1dcda87883dadabb128dcf9cdbe | 4ed038a638725ac77731b0b97ddd61aa37dd8d89 | /cairis/gui/SecurityPatternDialog.py | 08a80cb3e41d28e481cae171536b5d583ce0b767 | [
"Apache-2.0"
] | permissive | RachelLar/cairis_update | 0b784101c4aff81ff0390328eb615e335301daa2 | 0b1d6d17ce49bc74887d1684e28c53c1b06e2fa2 | refs/heads/master | 2021-01-19T06:25:47.644993 | 2016-07-11T20:48:11 | 2016-07-11T20:48:11 | 63,103,727 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,874 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import wx
from cairis.core.armid import *
from SecurityPatternPanel import SecurityPatternPanel
from cairis.core.SecurityPatternParameters import SecurityPatternParameters
import DialogClassParameters
class SecurityPatternDialog(wx.Dialog):
def __init__(self,parent,parameters):
wx.Dialog.__init__(self,parent,parameters.id(),parameters.label(),style=wx.DEFAULT_DIALOG_STYLE|wx.MAXIMIZE_BOX|wx.THICK_FRAME|wx.RESIZE_BORDER,size=(400,500))
self.thePatternName = ''
self.thePatternContext = ''
self.thePatternProblem = ''
self.thePatternSolution = ''
self.theConcernAssociations = []
self.theRequirements = []
self.thePatternId = -1
self.panel = 0
self.buildControls(parameters)
self.commitVerb = 'Add'
def buildControls(self,parameters):
mainSizer = wx.BoxSizer(wx.VERTICAL)
self.panel = SecurityPatternPanel(self)
self.panel.buildControls(parameters.createFlag())
mainSizer.Add(self.panel,1,wx.EXPAND)
self.SetSizer(mainSizer)
wx.EVT_BUTTON(self,SECURITYPATTERN_BUTTONCOMMIT_ID,self.onCommit)
def load(self,pattern):
self.thePatternId = pattern.id()
self.panel.loadControls(pattern)
self.commitVerb = 'Edit'
def onCommit(self,evt):
commitLabel = self.commitVerb + ' security pattern'
nameCtrl = self.FindWindowById(SECURITYPATTERN_TEXTNAME_ID)
contextCtrl = self.FindWindowById(SECURITYPATTERN_TEXTCONTEXT_ID)
problemCtrl = self.FindWindowById(SECURITYPATTERN_TEXTPROBLEM_ID)
solutionCtrl = self.FindWindowById(SECURITYPATTERN_TEXTSOLUTION_ID)
concernsCtrl = self.FindWindowById(SECURITYPATTERN_LISTPATTERNSTRUCTURE_ID)
reqsCtrl = self.FindWindowById(SECURITYPATTERN_LISTREQUIREMENTS_ID)
self.thePatternName = nameCtrl.GetValue()
self.thePatternContext = contextCtrl.GetValue()
self.thePatternProblem = problemCtrl.GetValue()
self.thePatternSolution = solutionCtrl.GetValue()
self.theConcernAssociations = concernsCtrl.associations()
self.theRequirements = reqsCtrl.requirements()
if len(self.thePatternName) == 0:
dlg = wx.MessageDialog(self,'Pattern name cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.thePatternContext) == 0:
dlg = wx.MessageDialog(self,'Context cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
if len(self.thePatternProblem) == 0:
dlg = wx.MessageDialog(self,'Problem cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
elif (len(self.thePatternSolution) == 0):
dlg = wx.MessageDialog(self,'Solution cannot be empty',commitLabel,wx.OK)
dlg.ShowModal()
dlg.Destroy()
return
else:
self.EndModal(SECURITYPATTERN_BUTTONCOMMIT_ID)
def parameters(self):
parameters = SecurityPatternParameters(self.thePatternName,self.thePatternContext,self.thePatternProblem,self.thePatternSolution,self.theRequirements,self.theConcernAssociations)
parameters.setId(self.thePatternId)
return parameters
| [
"[email protected]"
] | |
a683c1f4c81d2952675346854e2f80efb8473601 | 37ba3d21dcb6edd21e48dbb7f12591ac3590ab64 | /python_problems_competitive/ten_kinds_of_people.py | 4ef27f518e9cd247adcaa9041da10f96bc2643ec | [] | no_license | Hygens/hackerearth_hackerrank_solutions | 2feaedec255a85792d305bb8ff35675254a03f2a | 86cc4c9ca4d5246f24db8cda93400f1d7ee00882 | refs/heads/master | 2021-07-06T15:56:28.906533 | 2020-04-28T22:45:56 | 2020-04-28T22:45:56 | 55,160,498 | 1 | 2 | null | 2020-10-01T06:48:09 | 2016-03-31T15:15:36 | Python | UTF-8 | Python | false | false | 331 | py | r,c = map(int,input().split(' '))
l = []
for i in range(r):
l.append(list(input().strip()))
n = int(input().strip())
for _ in range(n):
r1,c1,r2,c2 = map(int,input().split(' '))
if l[r1-1][c1-1]==l[r2-1][c2-1]=='0': print('binary')
elif l[r1-1][c1-1]==l[r2-1][c2-1]=='1': print('decimal')
else: print('neither') | [
"[email protected]"
] | |
2e6d525f0693ba26ecf20429238d8ba878370522 | bc441bb06b8948288f110af63feda4e798f30225 | /resource_package_tools_sdk/model/container/ingress_rule_pb2.py | e7533ea4ef040c1f29394bd3dd0d9f6cdf9fbc34 | [
"Apache-2.0"
] | permissive | easyopsapis/easyops-api-python | 23204f8846a332c30f5f3ff627bf220940137b6b | adf6e3bad33fa6266b5fa0a449dd4ac42f8447d0 | refs/heads/master | 2020-06-26T23:38:27.308803 | 2020-06-16T07:25:41 | 2020-06-16T07:25:41 | 199,773,131 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | true | 4,319 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: ingress_rule.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from resource_package_tools_sdk.model.container import http_ingress_path_pb2 as resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='ingress_rule.proto',
package='container',
syntax='proto3',
serialized_options=_b('ZCgo.easyops.local/contracts/protorepo-models/easyops/model/container'),
serialized_pb=_b('\n\x12ingress_rule.proto\x12\tcontainer\x1a\x42resource_package_tools_sdk/model/container/http_ingress_path.proto\"y\n\x0bIngressRule\x12\x0c\n\x04host\x18\x01 \x01(\t\x12)\n\x04http\x18\x02 \x01(\x0b\x32\x1b.container.IngressRule.Http\x1a\x31\n\x04Http\x12)\n\x05paths\x18\x01 \x03(\x0b\x32\x1a.container.HTTPIngressPathBEZCgo.easyops.local/contracts/protorepo-models/easyops/model/containerb\x06proto3')
,
dependencies=[resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2.DESCRIPTOR,])
_INGRESSRULE_HTTP = _descriptor.Descriptor(
name='Http',
full_name='container.IngressRule.Http',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='paths', full_name='container.IngressRule.Http.paths', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=173,
serialized_end=222,
)
_INGRESSRULE = _descriptor.Descriptor(
name='IngressRule',
full_name='container.IngressRule',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='host', full_name='container.IngressRule.host', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='http', full_name='container.IngressRule.http', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_INGRESSRULE_HTTP, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=101,
serialized_end=222,
)
_INGRESSRULE_HTTP.fields_by_name['paths'].message_type = resource__package__tools__sdk_dot_model_dot_container_dot_http__ingress__path__pb2._HTTPINGRESSPATH
_INGRESSRULE_HTTP.containing_type = _INGRESSRULE
_INGRESSRULE.fields_by_name['http'].message_type = _INGRESSRULE_HTTP
DESCRIPTOR.message_types_by_name['IngressRule'] = _INGRESSRULE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
IngressRule = _reflection.GeneratedProtocolMessageType('IngressRule', (_message.Message,), {
'Http' : _reflection.GeneratedProtocolMessageType('Http', (_message.Message,), {
'DESCRIPTOR' : _INGRESSRULE_HTTP,
'__module__' : 'ingress_rule_pb2'
# @@protoc_insertion_point(class_scope:container.IngressRule.Http)
})
,
'DESCRIPTOR' : _INGRESSRULE,
'__module__' : 'ingress_rule_pb2'
# @@protoc_insertion_point(class_scope:container.IngressRule)
})
_sym_db.RegisterMessage(IngressRule)
_sym_db.RegisterMessage(IngressRule.Http)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
3fa376f3ef087cee256d7492675fdc21898a7b95 | 92c724afcc40c9e4d86af24b1b493e10fc8a994d | /src/figures/exploratory/exploratory_plots.py | f99cbafd230e2935a17d634a4cf0fd989b289b41 | [
"MIT"
] | permissive | willgdjones/GTEx | 48d7551c765700d0db34bb8f6e01f7f2a55bec6c | c56a5d548978545ab8a98e74236d52343113e9e6 | refs/heads/master | 2021-09-13T13:21:12.928226 | 2018-02-06T16:42:41 | 2018-02-06T16:42:41 | 90,028,785 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn
import h5py
GTEx_directory = '/hps/nobackup/research/stegle/users/willj/GTEx'
retrained_mean_features = {}
with h5py.File(GTEx_directory + '/small_data/new_retrained_inceptionet_aggregations.hdf5','r') as f:
expression = f['lung']['256']['expression'].value
for s in ['128','256','512','1024','2048','4096']:
size_retrained_mean_features = f['lung'][s]['mean'].value
retrained_mean_features[s] = size_retrained_mean_features
expression_IDs = f['lung']['256']['expression_IDs'].value
raw_mean_features = {}
with h5py.File(GTEx_directory + '/small_data/new_raw_inceptionet_aggregations.hdf5','r') as f:
for s in ['128','256','512','1024','2048','4096']:
size_raw_mean_features = f['lung'][s]['mean'].value
size_raw_mean_features[size_raw_mean_features < 0] = 0
raw_mean_features[s] = size_raw_mean_features
# Comparing variation for each patch size
# f, a = plt.subplots(1,6, figsize=(35,5))
# f.suptitle("Image feature variation. Lung, patch-size 256",size=30)
# for (i,s) in enumerate(['128','256','512','1024','2048','4096']):
# a[i].hist(np.std(retrained_mean_features[s],axis=0),bins=100)
# a[i].set_title("Patch-size {}".format(s),size=20)
# plt.tight_layout()
# plt.subplots_adjust(top=0.80)
# plt.savefig('figures/exploratory/plots/feature_variation.eps',format='eps', dpi=600)
# Comparing variation when concatenating all features together
# plt.figure()
# concatenated_features = np.vstack([retrained_mean_features['128'], retrained_mean_features['256'], retrained_mean_features['512'], retrained_mean_features['1024'], retrained_mean_features['2048'], retrained_mean_features['4096']])
# plt.hist(np.std(concatenated_features,axis=0),bins=100)
# cutoff = min(np.std(concatenated_features[:,np.argsort(np.std(concatenated_features,axis=0))[-500:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 300],c='red')
# plt.title("Histogram of variance from concatenated features across patch-sizes",size=11)
# plt.xlabel("Variance")
# plt.ylabel("Counts")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/concatenated_feature_variation.eps',format='eps', dpi=600)
# Histogram of expression means.
# Include cutoff for top 500
# plt.figure()
# plt.hist(np.mean(expression,axis=0),bins=100)
# cutoff = min(np.mean(expression[:,np.argsort(np.mean(expression,axis=0))[-1000:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 4500],c='red')
# plt.title("Histogram of mean gene expression")
# plt.xlabel("Mean expression")
# plt.ylabel("Count")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/mean_expression_histogram.eps',format='eps', dpi=600)
#
# # Histogram of expression standard deviation.
# # Include cutoff for top 1000
# plt.figure()
# plt.hist(np.std(expression,axis=0),bins=100)
# cutoff = min(np.std(expression[:,np.argsort(np.std(expression,axis=0))[-1000:]],axis=0))
# plt.plot([cutoff, cutoff], [0, 2500],c='red')
# plt.title("Histogram of gene expression standard deviation")
# plt.xlabel("Expression standard devation")
# plt.ylabel("Count")
# plt.tight_layout()
# plt.savefig('figures/exploratory/plots/std_expression_histogram.eps',format='eps', dpi=600)
| [
"[email protected]"
] | |
7c36ac1c024cf960649d2e0a49ddbbd0087fdc2f | a849caca4cc7b66bb3ca93552da873c1415f435d | /Lab Exercise 1.6.2020/fermi.py | 0fb3f6a5b5b8d5291e9c7c3a08e24662cec98290 | [] | no_license | nmessa/Python | 5215b957dc73ece422a0f4cc65752c387a437d34 | 1a32ca1f59aa5a3f89453b6e42d4336e6e8fb961 | refs/heads/master | 2021-07-11T04:45:08.222102 | 2020-09-17T17:32:07 | 2020-09-17T17:32:07 | 199,273,131 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,182 | py | ## Game of Fermi Version 0.5
## Author:
## Date: 1/6/2020
## The goal of the game is for the player to guess the digits in
## the three positions in the least number of tries. For each guess,
## the player provides three digits for position 1, 2, and 3.
## The program replies with a hint consisting of Fermi, Pico, and Nano.
## If the digit guess for a given position is correct, then the reply is Fermi.
## If the digit guessed for a given position is in a different position, then
## the reply is Pico. If the digit guessed for a given position does not match
## any of the three digits, then the reply is Nano.
from random import *
#Create variables
numbers = [1,2,3,4,5,6,7,8,9]
again = True
while again:
win = False
#Build the secret number of 3 unique numbers from 1 to 9
secret = []
while len(secret) < 3:
temp = choice(numbers)
if temp not in secret:
secret.append(temp)
numGuesses = 0 #keep track of numbers guessed
#Play a round
while not win:
#initialize counter and phrases list
count = 0
phrases = []
#Get number guess from user
temp = input("Enter 3 numbers (1 - 9)seperated by spaces: ").split()
#Build a list that represents the number guessed
#Add code here
#update number of guesses
#Add code here
#Algorithm to test number and generate 3 phrases
#Add code here
#Print the result of algorithm execution
for p in phrases:
print(p, end = ' ')
print()
#Check to see if you won
if phrases.count('Fermi') == 3: #this means you won
print('You won in', numGuesses, 'guesses')
win = True
answer = input("Play again (y/n)? ")
if answer == 'n':
again = False
## Sample Output
## Enter 3 numbers (1 - 9): 6 3 5
## Nano Pico Nano
## Enter 3 numbers (1 - 9): 3 4 2
## Pico Pico Nano
## Enter 3 numbers (1 - 9): 4 3 7
## Fermi Pico Nano
## Enter 3 numbers (1 - 9): 4 8 3
## Fermi Fermi Fermi
## You won in 4 guesses
| [
"[email protected]"
] | |
ef7250bd0abdff76776b5c47208d55fca1b57e6b | e3365bc8fa7da2753c248c2b8a5c5e16aef84d9f | /indices/farm.py | 230a7720738a0d70a94d4b11e691cbfc733a27b7 | [] | no_license | psdh/WhatsintheVector | e8aabacc054a88b4cb25303548980af9a10c12a8 | a24168d068d9c69dc7a0fd13f606c080ae82e2a6 | refs/heads/master | 2021-01-25T10:34:22.651619 | 2015-09-23T11:54:06 | 2015-09-23T11:54:06 | 42,749,205 | 2 | 3 | null | 2015-09-23T11:54:07 | 2015-09-18T22:06:38 | Python | UTF-8 | Python | false | false | 1,923 | py | ii = [('EmerRN.py', 4), ('CookGHP3.py', 2), ('LyelCPG2.py', 1), ('MarrFDI.py', 4), ('GodwWSL2.py', 1), ('SadlMLP.py', 1), ('WilbRLW4.py', 1), ('RennJIT.py', 2), ('AubePRP2.py', 6), ('CookGHP.py', 3), ('MartHSI2.py', 22), ('LeakWTI2.py', 6), ('KembFJ1.py', 4), ('WilkJMC3.py', 5), ('WilbRLW5.py', 2), ('LeakWTI3.py', 16), ('MarrFDI3.py', 21), ('TennAP.py', 1), ('PeckJNG.py', 24), ('KnowJMM.py', 1), ('AubePRP.py', 18), ('ChalTPW2.py', 4), ('AdamWEP.py', 1), ('FitzRNS3.py', 1), ('WilbRLW2.py', 4), ('ClarGE2.py', 6), ('GellWPT2.py', 1), ('WilkJMC2.py', 7), ('CarlTFR.py', 3), ('SeniNSP.py', 47), ('CoopJBT2.py', 1), ('RoscTTI3.py', 2), ('KiddJAE.py', 1), ('AdamHMM.py', 2), ('BailJD1.py', 4), ('RoscTTI2.py', 5), ('CoolWHM.py', 3), ('ClarGE.py', 10), ('LandWPA.py', 1), ('IrviWVD.py', 2), ('LyelCPG.py', 2), ('GilmCRS.py', 3), ('DaltJMA.py', 11), ('CrocDNL.py', 10), ('MedwTAI.py', 1), ('LandWPA2.py', 3), ('WadeJEB.py', 8), ('FerrSDO2.py', 2), ('SoutRD2.py', 1), ('LeakWTI4.py', 13), ('LeakWTI.py', 15), ('MedwTAI2.py', 7), ('BachARE.py', 41), ('SoutRD.py', 1), ('WheeJPT.py', 4), ('MereHHB3.py', 9), ('HowiWRL2.py', 22), ('MereHHB.py', 42), ('WilkJMC.py', 4), ('MartHRW.py', 13), ('MackCNH.py', 6), ('WestJIT.py', 1), ('FitzRNS4.py', 13), ('CoolWHM3.py', 4), ('DequTKM.py', 1), ('BentJRP.py', 4), ('EdgeMHT.py', 3), ('FerrSDO.py', 3), ('RoscTTI.py', 3), ('ThomGLG.py', 1), ('KembFJ2.py', 4), ('LewiMJW.py', 4), ('MackCNH2.py', 6), ('JacoWHI2.py', 4), ('HaliTBC.py', 1), ('WilbRLW3.py', 5), ('AinsWRR2.py', 3), ('MereHHB2.py', 24), ('BrewDTO.py', 1), ('JacoWHI.py', 17), ('ClarGE3.py', 22), ('RogeSIP.py', 3), ('MartHRW2.py', 4), ('DibdTRL.py', 1), ('FitzRNS2.py', 14), ('HogaGMM2.py', 1), ('MartHSI.py', 28), ('EvarJSP.py', 8), ('DwigTHH.py', 1), ('SadlMLP2.py', 1), ('BowrJMM2.py', 6), ('BowrJMM3.py', 1), ('BeckWRE.py', 2), ('TaylIF.py', 1), ('WordWYR.py', 3), ('KeigTSS.py', 7), ('WaylFEP.py', 28), ('ClarGE4.py', 23)] | [
"[email protected]"
] | |
ef31a019c6a45e981d10734a870eb4e44043c0d3 | 1dacbf90eeb384455ab84a8cf63d16e2c9680a90 | /pkgs/bokeh-0.11.1-py27_0/lib/python2.7/site-packages/bokeh/command/subcommands/tests/test_info.py | 42f6e6e775b0cc0b11df05470c21ff00bfa6d4cd | [
"Apache-2.0",
"BSD-3-Clause",
"LicenseRef-scancode-unknown"
] | permissive | wangyum/Anaconda | ac7229b21815dd92b0bd1c8b7ec4e85c013b8994 | 2c9002f16bb5c265e0d14f4a2314c86eeaa35cb6 | refs/heads/master | 2022-10-21T15:14:23.464126 | 2022-10-05T12:10:31 | 2022-10-05T12:10:31 | 76,526,728 | 11 | 10 | Apache-2.0 | 2022-10-05T12:10:32 | 2016-12-15T05:26:12 | Python | UTF-8 | Python | false | false | 1,238 | py | from __future__ import absolute_import
import bokeh.command.subcommands.info as scinfo
from bokeh.command.bootstrap import main
def test_create():
import argparse
from bokeh.command.subcommand import Subcommand
obj = scinfo.Info(parser=argparse.ArgumentParser())
assert isinstance(obj, Subcommand)
def test_name():
assert scinfo.Info.name == "info"
def test_help():
assert scinfo.Info.help == "print information about Bokeh and Bokeh server configuration"
def test_args():
assert scinfo.Info.args == (
('--static', dict(
action='store_true',
help="Print the locations of BokehJS static files",
)),
)
def test_run(capsys):
main(["bokeh", "info"])
out, err = capsys.readouterr()
lines = out.split("\n")
assert len(lines) == 5
assert lines[0].startswith("Python version")
assert lines[1].startswith("IPython version")
assert lines[2].startswith("Bokeh version")
assert lines[3].startswith("BokehJS static")
assert lines[4] == ""
assert err == ""
def test_run_static(capsys):
main(["bokeh", "info", "--static"])
out, err = capsys.readouterr()
assert err == ""
assert out.endswith('/bokeh/server/static\n')
| [
"[email protected]"
] | |
38a80c15f2ce13d2c78e5913a3b1aadf4fc2e70a | c9ddbdb5678ba6e1c5c7e64adf2802ca16df778c | /cases/pa3/benchmarks/sieve-457.py | 99bc8c55e3be4471951f624dbaca0a87b6c3a62a | [] | no_license | Virtlink/ccbench-chocopy | c3f7f6af6349aff6503196f727ef89f210a1eac8 | c7efae43bf32696ee2b2ee781bdfe4f7730dec3f | refs/heads/main | 2023-04-07T15:07:12.464038 | 2022-02-03T15:42:39 | 2022-02-03T15:42:39 | 451,969,776 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,586 | py | # A resizable list of integers
class Vector(object):
items: [int] = None
size: int = 0
def __init__(self:"Vector"):
self.items = [0]
# Returns current capacity
def capacity(self:"Vector") -> int:
return len(self.items)
# Increases capacity of vector by one element
def increase_capacity(self:"Vector") -> int:
self.items = self.items + [0]
return self.capacity()
# Appends one item to end of vector
def append(self:"Vector", item: int) -> object:
if self.size == self.capacity():
self.increase_capacity()
self.items[self.size] = item
self.size = self.size + 1
# Appends many items to end of vector
def append_all(self:"Vector", new_items: [int]) -> object:
item:int = 0
for item in new_items:
self.append(item)
# Removes an item from the middle of vector
def remove_at(self:"Vector", idx: int) -> object:
if idx < 0:
return
while idx < self.size - 1:
self.items[idx] = self.items[idx + 1]
idx = idx + 1
self.size = self.size - 1
# Retrieves an item at a given index
def get(self:"Vector", idx: int) -> int:
return self.items[idx]
# Retrieves the current size of the vector
def length(self:"Vector") -> int:
return self.size
# A faster (but more memory-consuming) implementation of vector
class DoublingVector(Vector):
doubling_limit:int = 1000
# Overriding to do fewer resizes
def increase_capacity(self:"DoublingVector") -> int:
if ($ID.capacity() <= self.doubling_limit // 2):
self.items = self.items + self.items
else:
# If doubling limit has been reached, fall back to
# standard capacity increases
self.items = self.items + [0]
return self.capacity()
# Makes a vector in the range [i, j)
def vrange(i:int, j:int) -> Vector:
v:Vector = None
v = DoublingVector()
while i < j:
v.append(i)
i = i + 1
return v
# Sieve of Eratosthenes (not really)
def sieve(v:Vector) -> object:
i:int = 0
j:int = 0
k:int = 0
while i < v.length():
k = v.get(i)
j = i + 1
while j < v.length():
if v.get(j) % k == 0:
v.remove_at(j)
else:
j = j + 1
i = i + 1
# Input parameter
n:int = 50
# Data
v:Vector = None
i:int = 0
# Crunch
v = vrange(2, n)
sieve(v)
# Print
while i < v.length():
print(v.get(i))
i = i + 1
| [
"[email protected]"
] | |
de584c4eb19bb366f817c87559e42f7c262ffe1d | 64a2e19c11929e9077a8c99e8d388de279e512e9 | /testRunner/runner.py | 0278655d701a52dba205cd68ffe58b706039b381 | [] | no_license | wallaceok/appiumn_auto | 60f8a2b152a27c39cabf12529345909979527115 | e543a662897c0eedfafdea64297947aa6de45539 | refs/heads/master | 2020-12-24T06:00:26.338592 | 2016-11-08T10:04:04 | 2016-11-08T10:04:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,471 | py | __author__ = 'Administrator'
# -*- coding: utf-8 -*-
import sys
sys.path.append("..")
import datetime
import xlsxwriter
import time
import unittest
from common import reportPhone
from testRunner.runnerBase import TestInterfaceCase, ga
from testCase.Home import testHome
from testCase.work import testContact
from testCase.web.comment import testComment
from testBLL import email as b_email
from testBLL import server
from testBLL import adbCommon
from testMode import email as memail
from testBLL import report as b_report
from testBLL import appBase
from testBLL import apkBase
from testMode import report as m_report
from common.variable import GetVariable as common
from common import dataToString
import os
PATH = lambda p: os.path.abspath(
os.path.join(os.path.dirname(__file__), p)
)
def get_email():
m_email = memail.GetEmail()
m_email.file = PATH( '../email.ini' )
email = b_email.read_email(m_email)
return email
def get_app_msg(f=r"D:\app\appium_study\img\t.apk"):
return apkBase.apkInfo(f).get_app_msg()
def get_common_report(start_test_time, endtime, starttime):
mreport = m_report.GetReport()
b_get_hp_info = appBase.get_phone_info()
raw = appBase.get_men_total(r"d:\men.log")
app_msg = get_app_msg(PATH( '../img/t.apk'))
mreport.test_sum = common.test_sum
mreport.test_failed = common.test_failed
mreport.test_success = common.test_success
mreport.test_sum_date = str((endtime - starttime).seconds-6) +"秒"
mreport.app_name = app_msg[0]
mreport.app_size = app_msg[1]
mreport.phone_name = b_get_hp_info["phone_name"] +" " + b_get_hp_info["phone_model"]
mreport.phone_rel =b_get_hp_info["release"]
mreport.phone_pix = appBase.get_app_pix()
mreport.phone_raw = reportPhone.phone_raw(raw/1024)
print(common.MEN)
avg_men = appBase.get_avg_raw(common.MEN) # 获取每次占用内存多少
mreport.phone_avg_use_raw = avg_men
mreport.phone_max_use_raw = reportPhone.phone_max_use_raw(common.MEN)
mreport.phone_cpu = appBase.get_cpu_kel()
mreport.phone_avg_use_cpu = reportPhone.phone_avg_use_cpu(common.CPU)
mreport.phone_avg_max_use_cpu = reportPhone.phone_avg_max_use_cpu(common.CPU)
mreport.app_version = app_msg[2]
mreport.test_date = start_test_time
mreport.fps_max = reportPhone.fps_max(common.FPS)
mreport.fps_avg = reportPhone.fps_avg(common.FPS)
b_report.OperateReport().set_report(mreport)
def get_common_web_report(start_test_time, endtime, starttime):
pass
def runnerCaseWeb():
suite = unittest.TestSuite()
starttime = datetime.datetime.now()
suite.addTest(TestInterfaceCase.parametrize(testComment))
unittest.TextTestRunner(verbosity=2).run(suite)
def runnerCaseApp():
start_test_time = dataToString.getStrTime(time.localtime(), "%Y-%m-%d %H:%M %p")
suite = unittest.TestSuite()
starttime = datetime.datetime.now()
suite.addTest(TestInterfaceCase.parametrize(testHome))
# suite.addTest(TestInterfaceCase.parametrize(testContact))
unittest.TextTestRunner(verbosity=2).run(suite)
endtime = datetime.datetime.now()
get_common_report(start_test_time, endtime, starttime)
report()
def report():
workbook = xlsxwriter.Workbook('GetReport.xlsx')
worksheet = workbook.add_worksheet("测试总况")
worksheet2 = workbook.add_worksheet("测试详情")
print(common.RRPORT)
b_OperateReport = b_report.OperateReport(wd=workbook, data=common.RRPORT)
b_OperateReport.init(worksheet)
b_OperateReport.detail(worksheet2)
b_OperateReport.close()
b_email.send_mail(get_email())
if __name__ == '__main__':
if ga.selenium_appium == common.APPIUM and ga.platformName == common.ANDROID :
if adbCommon.attached_devices():
appium_server = server.AppiumServer(ga.appiumJs, ga.Remote,ga.selenium_appium)
appium_server.start_server()
while not appium_server.is_runnnig():
time.sleep(2)
runnerCaseApp()
appium_server.stop_server()
else:
print(u"设备不存在")
if ga.selenium_appium == common.SELENIUM:
appium_server = server.AppiumServer(ga.selenium_jar, ga.sel_remote, ga.selenium_appium)
appium_server.start_server()
while not appium_server.is_runnnig():
time.sleep(2)
runnerCaseWeb()
appium_server.stop_server()
| [
"[email protected]"
] | |
ca4b7e3b02e3f9d8bd800d4002d8a1a7aaa44271 | 0b7add5d8583ba3bb02faf4fd5c356fd578f2fcc | /compileProtobuf/dstPb/RightInputProto_pb2.py | 6c7f725c3da07982fafe4f3b3735e4d2df9ca053 | [] | no_license | cappuccino213/IMCIS2Performance | 281f052f1a5dddb4956b3e7127781d2395c07e04 | 74528e0606f78459f6f3bfcf38d4fdf176a36f90 | refs/heads/master | 2023-03-27T20:44:57.266345 | 2021-03-29T07:56:56 | 2021-03-29T07:56:56 | 352,560,398 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | true | 3,095 | py | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: RightInputProto.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='RightInputProto.proto',
package='',
syntax='proto3',
serialized_options=b'\252\002\037TomTaw.eWordIMCIS.WebAPI.Models',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x15RightInputProto.proto\"K\n\x0fRightInputProto\x12\x0f\n\x07roleUID\x18\x01 \x01(\t\x12\x0f\n\x07userUID\x18\x02 \x01(\t\x12\x16\n\x0eisSuperManager\x18\x03 \x01(\tB\"\xaa\x02\x1fTomTaw.eWordIMCIS.WebAPI.Modelsb\x06proto3'
)
_RIGHTINPUTPROTO = _descriptor.Descriptor(
name='RightInputProto',
full_name='RightInputProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='roleUID', full_name='RightInputProto.roleUID', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='userUID', full_name='RightInputProto.userUID', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='isSuperManager', full_name='RightInputProto.isSuperManager', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=25,
serialized_end=100,
)
DESCRIPTOR.message_types_by_name['RightInputProto'] = _RIGHTINPUTPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
RightInputProto = _reflection.GeneratedProtocolMessageType('RightInputProto', (_message.Message,), {
'DESCRIPTOR' : _RIGHTINPUTPROTO,
'__module__' : 'RightInputProto_pb2'
# @@protoc_insertion_point(class_scope:RightInputProto)
})
_sym_db.RegisterMessage(RightInputProto)
DESCRIPTOR._options = None
# @@protoc_insertion_point(module_scope)
| [
"[email protected]"
] | |
a1ca652bbcbc6fe3ceebec0c3c56a8205ba2449f | 3597ecf8a014dbd6f7d998ab59919a94aff8011d | /front-web/src/www/application/modules/treatment/block/actions.py | 18cc3477c9e71c71e2a949ed2b6fbd5799dbce77 | [] | no_license | duytran92-cse/nas-genomebrowser | f42b8ccbb7c5245bde4e52a0feed393f4b5f6bf1 | d0240ad5edc9cfa8e7f89db52090d7d733d2bb8a | refs/heads/master | 2022-10-24T05:26:01.760241 | 2020-06-14T19:01:35 | 2020-06-14T19:01:35 | 272,264,694 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,214 | py | from django.http import HttpResponse, HttpResponseRedirect, JsonResponse
from django.conf import settings
from notasquare.urad_web import actions, page_contexts, widgets
from notasquare.urad_web_material import renderers
from application.modules.common import page_contexts, actions as common_actions, components as common_components
from application.themes.genopedia import renderers as genopedia_renderers
from application.themes.genopedia import widgets as genopedia_widgets
from application import constants
from . import components
class Update(actions.crud.UpdateAction, common_actions.BaseAction):
def create_form(self):
treatment_block = components.TreatmentBlockStore(self.get_container()).get(self.params['block_id'])
kind = treatment_block['data']['record']['kind']
form = widgets.form.Form()
form.renderer = renderers.widgets.form.HorizontalFormRenderer()
if kind == 'general_text':
form.add_field(widgets.field.Textbox('title'))
form.add_field(widgets.field.Textarea('text'))
form.renderer.add_section('General - Text')
form.renderer.add_field('title', 'Title')
form.renderer.add_field('text', 'Text', rows=15)
if kind == 'general_publications':
form.add_field(widgets.field.List('publications', {
'pmid': widgets.field.Textbox('pmid'),
'doi': widgets.field.Textbox('doi'),
'pmc': widgets.field.Textbox('pmc'),
'title': widgets.field.Textarea('title'),
'authors': widgets.field.Textarea('authors'),
'journal': widgets.field.Textarea('journal')
}))
form.renderer.add_section('General - Publications')
form.renderer.add_field('publications', 'Publications', columns=[
{'id': 'pmid', 'label': 'PMID', 'width': '10%'},
{'id': 'doi', 'label': 'DOI', 'width': '10%'},
{'id': 'pmc', 'label': 'PMC', 'width': '10%'},
{'id': 'title', 'label': 'Title', 'width': '30%'},
{'id': 'authors', 'label': 'Authors', 'width': '15%'},
{'id': 'journal', 'label': 'Journal', 'width': '15%'},
])
if kind == 'general_alias':
# Show effect & risk
form.add_field(widgets.field.List('alias', {
'id': widgets.field.Textbox('id'),
'alias': widgets.field.Textbox('alias')
}))
form.renderer.add_section('Variation - Alias')
form.renderer.add_field('alias', 'Alias', columns=[
{'id': 'alias', 'label': 'Alias', 'width': '50%'}
])
form.renderer.set_field_renderer('textbox', renderers.widgets.field.TextboxRenderer())
form.renderer.set_field_renderer('textarea', renderers.widgets.field.TextareaRenderer())
form.renderer.set_field_renderer('combobox', renderers.widgets.field.ComboboxRenderer())
form.renderer.set_field_renderer('list', renderers.widgets.field.ListRenderer())
return form
def load_form(self, form):
result = components.TreatmentBlockStore(self.get_container()).get(self.params['block_id'])
if result['status'] == 'ok':
record = result['data']['record']
form.set_things({
'page': 'treatment',
'page_title': record['treatment_title']
})
form.set_form_data(record)
else:
form.add_message('danger', "Can't load form")
def process_form_data(self, data):
# print "POST-Params-Update:", self.params
data['new_version'] = True
res = components.TreatmentBlockStore(self.get_container()).update(data, self.params['block_id'])
rs = components.TreatmentBlockStore(self.get_container()).helper(res['data']['pk'])
self.params['page_title'] = rs['data']['record']['title']
return res
def handle_on_success(self, messages):
return HttpResponseRedirect('/treatment/%s' % (self.params["page_title"]))
| [
"[email protected]"
] | |
2acbb3b79b0a4861189cb1c43f2d7fd5049f0132 | fc2447b91cbee82e74e939092ec1903678f3217a | /PythonPractice/hm_py/hm_oop/oop_single.py | 4b09ef3d411106af86bc146dc8c60d1ee2a315ee | [] | no_license | yglj/learngit | 0eac654e7c49f2ede064b720e6ee621a702193b4 | 74fb4b93d5726c735b64829cafc99878d8082121 | refs/heads/master | 2022-12-24T10:01:56.705046 | 2019-05-27T21:04:08 | 2019-05-27T21:04:08 | 146,157,116 | 0 | 1 | null | 2022-12-12T07:01:25 | 2018-08-26T06:28:20 | HTML | UTF-8 | Python | false | false | 924 | py | # 单例设计模式
# 类只有创建唯一个对象实例
# 应用场景: 打印机,回收站,音乐播放对象
# __new__() object提供的内置静态方法,作用:为对象分配空间,返回对象引用
class MusicPlayer:
__init_flag = False
instance = None
def __new__(cls, *args):
if cls.instance is None: # 利用__new__只分配一次对象空间,来实现单例
print('创建对象时,自动分配空间')
cls.instance = super().__new__(cls)
# print(instance)
return cls.instance # 返回对象引用
return cls.instance
def __init__(self): # 让初始化动作只执行一次:利用标志位控制
if MusicPlayer.__init_flag:
return
print('初始化对象,分配实例对象属性')
MusicPlayer.__init_flag = True
m = MusicPlayer()
print('-' * 30)
m2 = MusicPlayer()
| [
"[email protected]"
] | |
1e75dd937e7a1e842472c37a23b9269408e82317 | c0340c511cff5b40b4681c4d3238d807624c0323 | /results/correlations/plot_byLanguage/plotByLanguage_Combined.py | 9578733a8e10cb798dc13acd08a5f49b86cbdb51 | [] | no_license | m-hahn/grammar-optim | 5fa7ade47d2ad91f517c887ee2c65af24059069d | 07a1a80692a504bcafc8120a21c4dc9066b495ee | refs/heads/master | 2022-08-30T06:54:42.749264 | 2022-08-05T12:09:28 | 2022-08-05T12:09:28 | 156,456,167 | 13 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,378 | py | source("./readGrammarsPerLanguage_Combined.py")
D$LanguageNumeric = as.numeric(D$Language_Ordered)
D$FamilyPrint = as.character(D$Family)
D = D %>% mutate(FamilyPrint = ifelse(FamilyPrint == "Malayo-Sumbawan", "Mal.-Sum.", as.character(FamilyPrint)))
D = D %>% mutate(FamilyPrint = ifelse(FamilyPrint == "Sino-Tibetan", "Sin.-Tib.", as.character(FamilyPrint)))
D = D %>% mutate(FamilyPrint = ifelse(FamilyPrint == "Viet-Muong", "Viet-M.", as.character(FamilyPrint)))
DFam = D %>% group_by(FamilyPrint) %>% summarise(Start = min(LanguageNumeric), End = max(LanguageNumeric), Mean = mean(LanguageNumeric))
DFam$yOffset = 0.2*(1:(nrow(DFam)))
D$yOffset=NULL
D = merge(D, DFam %>% select(FamilyPrint, yOffset), by=c("FamilyPrint"))
DLang = unique(D %>% select(Language_Ordered, iso_Ordered, LanguageNumeric, yOffset))
D = D %>% mutate(CoarseDependency = recode(CoarseDependency, lifted_case=1, lifted_cop=2, aux=3, nmod=4, acl=5, lifted_mark=6, obl=7, xcomp=8))
plot_orders_real = ggplot(D %>% filter(Type == "Real Languages"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
#theme_bw() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_orders_eff = ggplot(D %>% filter(Type == "Efficiency"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_orders_surp = ggplot(D %>% filter(Type == "Predictability"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_orders_pars = ggplot(D %>% filter(Type == "Parseability"), aes(x = 1, y = LanguageNumeric+yOffset, group=CoarseDependency)) +
geom_point(aes(fill=DirB, colour = DirB, size =1), position = position_dodge(width=2.0)) +
# scale_color_gradient() + #values=c("blue", "green")) +
theme_classic() +
theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),axis.ticks=element_blank(),
plot.title=element_text(size=11)) +
theme(axis.title=element_blank()) +
theme(legend.position="none") + labs(x=NULL) +
scale_x_continuous(breaks = NULL) +
scale_y_continuous(breaks = NULL)
plot_langs = ggplot(DLang)
plot_langs = plot_langs + theme_classic()
plot_langs = plot_langs + theme(axis.text.x=element_blank(), #element_text(size=9, angle=0, vjust=0.3),
axis.text.y=element_blank(),
plot.title=element_text(size=11))
plot_langs = plot_langs + geom_text(aes(x=1.2 + 0.07, y=LanguageNumeric+yOffset, label=iso_Ordered), hjust=1, size=3, colour="grey30")
plot_langs = plot_langs + theme(axis.title=element_blank())
plot_langs = plot_langs + xlim(-2.0, 1.35)
plot_langs = plot_langs + geom_segment(data=DFam, aes(x=0, y=Start+yOffset, xend=0.5, yend=Start+yOffset))
plot_langs = plot_langs + geom_segment(data=DFam, aes(x=0, y=End+yOffset, xend=0.5, yend=End+yOffset))
plot_langs = plot_langs + geom_segment(data=DFam, aes(x=0, y=Start+yOffset, xend=0, yend=End+yOffset))
plot_langs = plot_langs + geom_text(data=DFam, aes(x=-0.1, y=Mean+yOffset , label=FamilyPrint), hjust=1, size=3, colour="grey30")
plot_langs = plot_langs + theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank(),
panel.background = element_blank(), axis.line = element_blank(),
plot.margin=unit(c(0,0,0,0), "mm"),
axis.ticks = element_blank()) + labs(x=NULL)
library("gridExtra")
plot_orders_real = plot_orders_real + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot_orders_eff = plot_orders_eff + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot_orders_surp = plot_orders_surp + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot_orders_pars = plot_orders_pars + theme( plot.margin=unit(c(0,0,0,0), "mm"))
plot = grid.arrange(plot_langs, plot_orders_real, plot_orders_eff, plot_orders_surp, plot_orders_pars, nrow=1, widths=c(1, 1.2, 1.2, 1.2, 1.2))
ggsave(plot=plot, "../figures/pred-eff-pred-pars-families.pdf", width=6, height=8)
plot_langs2 = plot_langs + annotate("text", label="", x=1, y=58.5, size=6)
plot_orders_real2 = plot_orders_real + annotate("text", label="Real", x=1, y=58.5, size=6)
plot_orders_real2 = plot_orders_real2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_real2 = plot_orders_real2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_real2
plot_orders_eff2 = plot_orders_eff + annotate("text", label="Efficiency", x=1, y=58.5, size=5)
plot_orders_eff2 = plot_orders_eff2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_eff2 = plot_orders_eff2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_eff2
plot_orders_surp2 = plot_orders_surp + annotate("text", label="Predictability", x=1, y=58.5, size=5)
plot_orders_surp2 = plot_orders_surp2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_surp2 = plot_orders_surp2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_surp2
plot_orders_pars2 = plot_orders_pars + annotate("text", label="Parseability", x=1, y=58.5, size=5)
plot_orders_pars2 = plot_orders_pars2 + geom_point(data=data.frame(num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=NA, y=56.7, colour=NA, fill=NA), color="black", fill=NA, size=4.5, shape=21)
plot_orders_pars2 = plot_orders_pars2 + geom_text(data=data.frame(CoarseDependency=unique(D$CoarseDependency), num=c(1,2,3,4,5,6,7,8)), aes(x=0.25 * num - 0.12, group=CoarseDependency, y=56.55, label=as.character(num)))
plot_orders_pars2
plot = grid.arrange(plot_langs2, plot_orders_real2, plot_orders_eff2, plot_orders_surp2, plot_orders_pars2, nrow=1, widths=c(1, 1.2, 1.2, 1.2, 1.2))
plot
ggsave(plot=plot, "../figures/pred-eff-pred-pars-families-2.pdf", width=6, height=8)
D2 = (D %>% select(Family, Language, CoarseDependency, DirB, Type) %>% spread(Type, DirB) %>% rename(Real = 'Real Languages') %>% rename(Predicted = Efficiency))
D2$Agree = (D2$Real == D2$Predicted)
#summary(glmer(Agree ~ (1|CoarseDependency) + (1|Family), data=D2, family="binomial"))
mean(D2$Agree)
| [
"[email protected]"
] | |
e6cc8b2f9f4f193759e2a16a4b7d84f28a162423 | d87483a2c0b50ed97c1515d49d62c6e9feaddbe0 | /.history/get_positions_20210205021452.py | db0322e005440ad0d993a22856f8587be75cdf25 | [
"MIT"
] | permissive | HopperKremer/hoptrader | 0d36b6e33922414003cf689fb81f924da076a54b | 406793c10bc888648290fd15c7c2af62cf8c6c67 | refs/heads/main | 2023-06-12T15:51:00.910310 | 2021-07-06T16:15:41 | 2021-07-06T16:15:41 | 334,754,936 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,840 | py | # Buy top tickers from Financhill
import requests
from tda import auth, client
from tda.orders.equities import equity_buy_market, equity_buy_limit
from tda.orders.common import Duration, Session
import os, sys
import time
from selenium import webdriver
import json
currentdir = os.path.dirname(os.path.realpath(__file__))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
import config # stored in parent directory for security
token_path = "token"
c = auth.client_from_token_file(token_path, config.api_key)
# positions = c.get_account(config.tda_acct_num, c.Account.Fields.POSITIONS)
# account_info = c.get_account(config.tda_acct_num, fields=[c.Account.Fields.POSITIONS]).json()
# print(account_info)
# positions = c.Account.Fields.POSITIONS
# r = c.get_account(config.tda_acct_num, fields=positions)
# stocks = r.json()['securitiesAccount']['positions']
# # stocks = json.dumps(r.json(), indent=4)
# for stock in stocks:
# print('--------------------------------')
# print(stock['instrument']['symbol'])
# orders = c.Order.Status.FILLED
# r = c.get_orders_by_path(config.tda_acct_num, status = client.Client.Order.Status.WORKING)
# res = c.get_orders_by_path(config.tda_acct_num, status = orders)
# res = s = c.get_account(config.tda_acct_num, fields=c.Account.Fields.POSITIONS)
# data = r.json()
# print(r.json())
orders = client.Client.Account.Fields.ORDERS
r = c.get_account(config.tda_acct_num, fields=orders)
print(json.dumps(r.json(), indent=4))#queued orders would appear here, if not blank list
l = r.json()['securitiesAccount']['orderStrategies']
canceled_orders = [i['orderId'] for i in l if i['status'] == 'CANCELED']
print('canceled', canceled_orders)
id
for order_id in canceled_orders:
g = c.get_order(order_id, config.tda_acct_num)
print(json.dumps(g.json(), indent=4)) | [
"[email protected]"
] | |
dc816389c06442347a202791e2f3ecfc4e43a317 | 2cd06e44dd79b45708ddf010c31289458d850b94 | /test/functional/feature_maxuploadtarget.py | b5a44cbc6b5cb6b89aca3c4c47d2ce7ef4634a00 | [
"MIT"
] | permissive | adymoloca/flocoin | bc66233e5b3b1af294ca6719b4a26f8829d682e4 | d9244577577dede975c852f6fcfe1afba4d71a57 | refs/heads/master | 2023-08-21T23:51:28.266695 | 2021-10-06T01:40:10 | 2021-10-06T01:40:10 | 408,609,250 | 0 | 0 | MIT | 2021-09-30T10:11:53 | 2021-09-20T21:45:28 | C++ | UTF-8 | Python | false | false | 6,653 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test behavior of -maxuploadtarget.
* Verify that getdata requests for old blocks (>1week) are dropped
if uploadtarget has been reached.
* Verify that getdata requests for recent blocks are respected even
if uploadtarget has been reached.
* Verify that the upload counters are reset after 24 hours.
"""
from collections import defaultdict
import time
from test_framework.messages import CInv, MSG_BLOCK, msg_getdata
from test_framework.p2p import P2PInterface
from test_framework.test_framework import FlocoinTestFramework
from test_framework.util import assert_equal, mine_large_block
class TestP2PConn(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
def on_inv(self, message):
pass
def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
class MaxUploadTest(FlocoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
"-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
# Cache for utxos, as the listunspent may take a long time later in the test
self.utxo_cache = []
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
# Before we connect anything, we first set the time on the node
# to be in the past, otherwise things break because the CNode
# time counters can't be reset backward after initialization
old_time = int(time.time() - 2*60*60*24*7)
self.nodes[0].setmocktime(old_time)
# Generate some old blocks
self.nodes[0].generate(130)
# p2p_conns[0] will only request old blocks
# p2p_conns[1] will only request new blocks
# p2p_conns[2] will test resetting the counters
p2p_conns = []
for _ in range(3):
p2p_conns.append(self.nodes[0].add_p2p_connection(TestP2PConn()))
# Now mine a big block
mine_large_block(self.nodes[0], self.utxo_cache)
# Store the hash; we'll request this later
big_old_block = self.nodes[0].getbestblockhash()
old_block_size = self.nodes[0].getblock(big_old_block, True)['size']
big_old_block = int(big_old_block, 16)
# Advance to two days ago
self.nodes[0].setmocktime(int(time.time()) - 2*60*60*24)
# Mine one more block, so that the prior block looks old
mine_large_block(self.nodes[0], self.utxo_cache)
# We'll be requesting this new block too
big_new_block = self.nodes[0].getbestblockhash()
big_new_block = int(big_new_block, 16)
# p2p_conns[0] will test what happens if we just keep requesting the
# the same big old block too many times (expect: disconnect)
getdata_request = msg_getdata()
getdata_request.inv.append(CInv(MSG_BLOCK, big_old_block))
max_bytes_per_day = 800*1024*1024
daily_buffer = 144 * 4000000
max_bytes_available = max_bytes_per_day - daily_buffer
success_count = max_bytes_available // old_block_size
# 576MB will be reserved for relaying new blocks, so expect this to
# succeed for ~235 tries.
for i in range(success_count):
p2p_conns[0].send_and_ping(getdata_request)
assert_equal(p2p_conns[0].block_receive_map[big_old_block], i+1)
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
self.log.info("Peer 0 disconnected after downloading old block too many times")
# Requesting the current block on p2p_conns[1] should succeed indefinitely,
# even when over the max upload target.
# We'll try 800 times
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(800):
p2p_conns[1].send_and_ping(getdata_request)
assert_equal(p2p_conns[1].block_receive_map[big_new_block], i+1)
self.log.info("Peer 1 able to repeatedly download new block")
# But if p2p_conns[1] tries for an old block, it gets disconnected too.
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
p2p_conns[1].send_message(getdata_request)
p2p_conns[1].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 1)
self.log.info("Peer 1 disconnected after trying to download old block")
self.log.info("Advancing system time on node to clear counters...")
# If we advance the time by 24 hours, then the counters should reset,
# and p2p_conns[2] should be able to retrieve the old block.
self.nodes[0].setmocktime(int(time.time()))
p2p_conns[2].sync_with_ping()
p2p_conns[2].send_and_ping(getdata_request)
assert_equal(p2p_conns[2].block_receive_map[big_old_block], 1)
self.log.info("Peer 2 able to download old block")
self.nodes[0].disconnect_p2ps()
self.log.info("Restarting node 0 with download permission and 1MB maxuploadtarget")
self.restart_node(0, ["[email protected]", "-maxuploadtarget=1"])
# Reconnect to self.nodes[0]
peer = self.nodes[0].add_p2p_connection(TestP2PConn())
#retrieve 20 blocks which should be enough to break the 1MB limit
getdata_request.inv = [CInv(MSG_BLOCK, big_new_block)]
for i in range(20):
peer.send_and_ping(getdata_request)
assert_equal(peer.block_receive_map[big_new_block], i+1)
getdata_request.inv = [CInv(MSG_BLOCK, big_old_block)]
peer.send_and_ping(getdata_request)
self.log.info("Peer still connected after trying to download old block (download permission)")
peer_info = self.nodes[0].getpeerinfo()
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
if __name__ == '__main__':
MaxUploadTest().main()
| [
"[email protected]"
] | |
ea8bcdc0b183def68c8745950edbbf13533c588d | 65b708f0646ea090a4e9bc615cd37fd799bd9bce | /venv/Scripts/pip3-script.py | 307f938a7427296d42bf18912a97aeee71dc9f96 | [] | no_license | chrisna2/python-web-scrapping | af803079586c7b798365d23f5667a24d0c6633e8 | 92e74b4985006246f543de87ff26673b94e8c0a8 | refs/heads/master | 2020-07-08T14:40:32.959560 | 2019-08-23T03:19:47 | 2019-08-23T03:19:47 | 203,703,270 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 416 | py | #!D:\tyn_dev\workspace_pycham\web-scrapping\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')()
)
| [
"[email protected]"
] | |
fb7d5bf4a453cf483c36820ec233a56926f63930 | c7e765a9bed33d3bfb21774e3995bf4a09e04add | /adminmgr/media/code/A2/python/task/BD_1117_1375_1419_1525.py | ea86b27f17cbd91f3957294e879438d4f68c005f | [
"Apache-2.0"
] | permissive | IamMayankThakur/test-bigdata | 13dd2ac7fb76c9baed6c3a0aa943057a22e2d237 | 7f507918c7bec31c92eedcd94491a83486623049 | refs/heads/master | 2022-05-03T00:59:44.127494 | 2022-02-10T19:50:16 | 2022-02-10T19:50:16 | 201,585,028 | 10 | 4 | Apache-2.0 | 2022-04-22T23:39:45 | 2019-08-10T05:34:09 | Python | UTF-8 | Python | false | false | 2,562 | py | from __future__ import print_function
import re
import sys
from operator import add
from pyspark.sql import *
def calcRank(BatBowl, rank):
n = len(BatBowl)
for i in BatBowl:
yield (i, float(rank)/float(n))
checking = 1
def batbowlKeyValue(x):
lol = x.split(',')
return lol[0],lol[1]
def batbowlRank(x):
lol = x.split(',')
return lol[1],float(lol[2])/float(lol[3])
if __name__ == "__main__" :
if len(sys.argv) != 4:
sys.exit(-1)
spark = SparkSession.builder.appName("Bowlerrank").getOrCreate()
lol = spark.read.text(sys.argv[1]).rdd.map(lambda x : x[0])
lol2 = lol.map(lambda x: batbowlKeyValue(x)).distinct().groupByKey().cache()
lol_temp = lol.map(lambda x: batbowlRank(x)).distinct().groupByKey()
bowr = lol_temp.map(lambda x : (x[0], max(sum(x[1]),1.00)))
itcount = 0
bowr_temp = bowr
noi = int(sys.argv[2])
if (noi <= 0) :
while True:
lol3 = lol2.join(bowr).flatMap(lambda x : calcRank(x[1][0], x[1][1]))
perc = int(sys.argv[3])
if(perc!=0):
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*(float(perc/100)) + 1-(float(perc/100)))
else:
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*0.8 + 0.2)
#for wolverine, iron_man in bowr.collect():
# print("%s has rank: %s." % (wolverine, iron_man))
temp = bowr.join(bowr_temp)
temp2 = temp.collect()
flag = 0
for i in temp2:
if(abs(i[1][0]-i[1][1])<0.0001):
flag = flag + 1
else:
break
itcount = itcount + 1
bowr_temp = bowr
if flag==len(temp2):
break
else:
t = int(sys.argv[2])
for _ in range(t):
lol3 = lol2.join(bowr).flatMap(lambda x : calcRank(x[1][0], x[1][1]))
perc = int(sys.argv[3])
if(perc!=0):
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*(float(perc)/100.00) + 1-(float(perc)/100.00))
else:
bowr = lol3.reduceByKey(add).mapValues(lambda deadpool : deadpool*0.8 + 0.2)
bowr = bowr.sortBy(lambda x : (-x[1],x[0]))
for wolverine, iron_man in bowr.collect():
print("%s,%.12f" % (wolverine, iron_man))
#print("...................................",itcount,"...............................................")
spark.stop()
| [
"[email protected]"
] | |
7a3f9d1a7437cf258fd93efcfdfa3f3a3316d099 | 45ca434bdb9e48fdbb2cda0e7fdd9a76474117b0 | /aliyun-python-sdk-cdn/aliyunsdkcdn/request/v20141111/SetFileCacheExpiredConfigRequest.py | 55bd0c22970cd64217840720fb797559c0c97d7f | [
"Apache-2.0"
] | permissive | wanyanzhenjiang/aliyun-openapi-python-sdk | e41e9937ad3f851e5a58f6bea95663e88f7fee13 | 4a5bf1b35f2395d047ead4444ea46721976bdd24 | refs/heads/master | 2020-12-30T10:37:55.789911 | 2017-07-27T06:55:15 | 2017-07-27T06:55:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,989 | py | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
#
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from aliyunsdkcore.request import RpcRequest
class SetFileCacheExpiredConfigRequest(RpcRequest):
def __init__(self):
RpcRequest.__init__(self, 'Cdn', '2014-11-11', 'SetFileCacheExpiredConfig')
def get_OwnerId(self):
return self.get_query_params().get('OwnerId')
def set_OwnerId(self,OwnerId):
self.add_query_param('OwnerId',OwnerId)
def get_SecurityToken(self):
return self.get_query_params().get('SecurityToken')
def set_SecurityToken(self,SecurityToken):
self.add_query_param('SecurityToken',SecurityToken)
def get_DomainName(self):
return self.get_query_params().get('DomainName')
def set_DomainName(self,DomainName):
self.add_query_param('DomainName',DomainName)
def get_CacheContent(self):
return self.get_query_params().get('CacheContent')
def set_CacheContent(self,CacheContent):
self.add_query_param('CacheContent',CacheContent)
def get_TTL(self):
return self.get_query_params().get('TTL')
def set_TTL(self,TTL):
self.add_query_param('TTL',TTL)
def get_Weight(self):
return self.get_query_params().get('Weight')
def set_Weight(self,Weight):
self.add_query_param('Weight',Weight) | [
"[email protected]"
] | |
dc4e498d5e94244fea4ccc62a2671836d7858c62 | 438f8490be1fa3818daad38254a77bb11ba367b3 | /project/settings.py | 6b500d837b395a5d1f67ad16fa8a0d8088cd6b65 | [] | no_license | n7ey233/maximanat | 2ea0b3c80729dd10e6023b053523ebe7e6ba22d8 | 812d7396fe64af85f86e2dd5e257935bde8719e2 | refs/heads/master | 2020-04-13T10:13:01.382912 | 2018-12-28T06:31:19 | 2018-12-28T06:31:19 | 163,133,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,284 | py | """
Django settings for project project.
Generated by 'django-admin startproject' using Django 2.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '-@xf882li3g_x28_oqt5(=fj8b$*2*9*$hm3(17g^#(klc7pgg'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['127.0.0.1', 'maximanat.pythonanywhere.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'project.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/2.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
| [
"[email protected]"
] | |
5b161e0a9d07b0bddab72ace77e7c27caff8d41a | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/89/usersdata/202/62093/submittedfiles/matriz1.py | f2273f8e67d6acee35c26497ab06b264203a4c29 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 774 | py | # -*- coding: utf-8 -*-
import numpy as np
def cortel1(a):
for i in range(0,a.shape[0],1):
for i in range (0,a.shape[1],1):
if a[i,j]==1:
return i
def cortel2(a):
for j in range(0,a.shape[1],1):
for i in range (0,a.shape[0],1):
if a[i,j]==1:
return j
def cortec1(a):
for j in range (0,a.shape[1],1):
for i in range(0,a.shape[0],1):
if a[i,j]==1:
c2=j
return c2
linhas=int(input('linhas:'))
colunas=int(input('colunas:'))
a=np.zeros((linhas,colunas))
for i in range(0,a.shape[0],1):
for j in range (0,a.shape[1],1):
a[i,j]=int(input('valor:'))
l1=cortel1(a)
l2=cortel2(a)
c1=cortec1(a)
c2=cortec2(a)
print(a[l1:l2+1,c1:c2+1])
| [
"[email protected]"
] | |
dcb9a544cce84c43cf9b3d7b349db60e8139ccde | 9ce822c07edef943dc519a7ab3916f5a667e114a | /location_and_settings/location_and_settings/doctype/location_list/test_location_list.py | 28da20eda77448d48672ee27fa1509ed7b30bbe6 | [
"MIT"
] | permissive | hrgadeha/location_and_setting | 250fec37c2e63ce7c31b41ac52e50bea1e333392 | 770a7a719ce66bfe699dc805839a972063ff8ab6 | refs/heads/master | 2020-04-25T03:43:09.423485 | 2019-02-28T12:09:31 | 2019-02-28T12:09:31 | 172,486,490 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 217 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2019, Hardik Gadesha and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
class TestLocationList(unittest.TestCase):
pass
| [
"[email protected]"
] | |
943046ca83cc744a00369e1e7ddfec263a7dc795 | ad23b164febd12d5c6d97cfbcd91cf70e2914ab3 | /webtestdata/wsgi.py | 882a4bbee8f33c55053afc3819608ab439306db9 | [] | no_license | wawj901124/webtestdata | 9eedf9a01dec2c157725299bda9a42e8d357ef0b | 54f6412566fce07ece912760c5caea73ede819cb | refs/heads/master | 2022-12-09T14:18:38.125191 | 2021-04-25T07:54:07 | 2021-04-25T07:54:07 | 175,773,318 | 1 | 1 | null | 2022-12-08T02:39:15 | 2019-03-15T07:49:16 | Python | UTF-8 | Python | false | false | 399 | py | """
WSGI config for webtestdata project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/2.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "webtestdata.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
345e69a557ad41d9aae7895d883495769eee2017 | 41b4702e359e3352116eeecf2bdf59cb13c71cf2 | /full_model_walker_param/utils/env_utils.py | 110ef30017c069549d041f0bfb487b464dfec838 | [] | no_license | CaralHsi/Multi-Task-Batch-RL | b0aad53291c1713fd2d89fa4fff4a85c98427d4d | 69d29164ab7d82ec5e06a929ed3b96462db21853 | refs/heads/master | 2022-12-22T19:23:45.341092 | 2020-10-01T00:05:36 | 2020-10-01T00:05:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,899 | py | import os
from gym import Env
from gym.spaces import Box, Discrete, Tuple
import numpy as np
from env.hopper import HopperVelEnv
from env.half_cheetah import HalfCheetahVelEnv
from env.ant_goal import AntGoalEnv
from env.ant_dir import AntDirEnv
from env.humanoid_dir import HumanoidDirEnv
from env.humanoid_dir_openai import HumanoidDirEnvOpenAI
from env.humanoid_goal_ndone import HumanoidGoalEnvNDone
from env.walker_param import Walker2DRandParamsEnv
def get_dim(space):
if isinstance(space, Box):
return space.low.size
elif isinstance(space, Discrete):
return space.n
elif isinstance(space, Tuple):
return sum(get_dim(subspace) for subspace in space.spaces)
elif hasattr(space, 'flat_dim'):
return space.flat_dim
else:
raise TypeError("Unknown space: {}".format(space))
class ProxyEnv(Env):
def __init__(self, wrapped_env):
self._wrapped_env = wrapped_env
self.action_space = self._wrapped_env.action_space
self.observation_space = self._wrapped_env.observation_space
@property
def wrapped_env(self):
return self._wrapped_env
def reset(self, **kwargs):
return self._wrapped_env.reset(**kwargs)
def step(self, action):
return self._wrapped_env.step(action)
def render(self, *args, **kwargs):
return self._wrapped_env.render(*args, **kwargs)
@property
def horizon(self):
return self._wrapped_env.horizon
def terminate(self):
if hasattr(self.wrapped_env, "terminate"):
self.wrapped_env.terminate()
def __getattr__(self, attr):
if attr == '_wrapped_env':
raise AttributeError()
return getattr(self._wrapped_env, attr)
def __getstate__(self):
"""
This is useful to override in case the wrapped env has some funky
__getstate__ that doesn't play well with overriding __getattr__.
The main problematic case is/was gym's EzPickle serialization scheme.
:return:
"""
return self.__dict__
def __setstate__(self, state):
self.__dict__.update(state)
def __str__(self):
return '{}({})'.format(type(self).__name__, self.wrapped_env)
class NormalizedBoxEnv(ProxyEnv):
"""
Normalize action to in [-1, 1].
Optionally normalize observations and scale reward.
"""
def __init__(
self,
env,
reward_scale=1.,
obs_mean=None,
obs_std=None,
):
ProxyEnv.__init__(self, env)
self._should_normalize = not (obs_mean is None and obs_std is None)
if self._should_normalize:
if obs_mean is None:
obs_mean = np.zeros_like(env.observation_space.low)
else:
obs_mean = np.array(obs_mean)
if obs_std is None:
obs_std = np.ones_like(env.observation_space.low)
else:
obs_std = np.array(obs_std)
self._reward_scale = reward_scale
self._obs_mean = obs_mean
self._obs_std = obs_std
ub = np.ones(self._wrapped_env.action_space.shape)
self.action_space = Box(-1 * ub, ub)
def estimate_obs_stats(self, obs_batch, override_values=False):
if self._obs_mean is not None and not override_values:
raise Exception("Observation mean and std already set. To "
"override, set override_values to True.")
self._obs_mean = np.mean(obs_batch, axis=0)
self._obs_std = np.std(obs_batch, axis=0)
def _apply_normalize_obs(self, obs):
return (obs - self._obs_mean) / (self._obs_std + 1e-8)
def step(self, action):
lb = self._wrapped_env.action_space.low
ub = self._wrapped_env.action_space.high
scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
wrapped_step = self._wrapped_env.step(scaled_action)
next_obs, reward, done, info = wrapped_step
if self._should_normalize:
next_obs = self._apply_normalize_obs(next_obs)
return next_obs, reward * self._reward_scale, done, info
def __str__(self):
return "Normalized: %s" % self._wrapped_env
def domain_to_env(name):
from gym.envs.mujoco import HalfCheetahEnv, \
InvertedPendulumEnv, HumanoidEnv, \
HopperEnv, AntEnv, Walker2dEnv
return {
'invertedpendulum': InvertedPendulumEnv,
'humanoid': HumanoidEnv,
'halfcheetah': HalfCheetahEnv,
'halfcheetah-vel': HalfCheetahVelEnv,
'hopper': HopperEnv,
'hopper-vel': HopperVelEnv,
'ant': AntEnv,
'ant-goal': AntGoalEnv,
'ant-dir': AntDirEnv,
'humanoid-dir':HumanoidDirEnv,
'humanoid-openai-dir': HumanoidDirEnvOpenAI,
'humanoid-ndone-goal': HumanoidGoalEnvNDone,
'walker2d': Walker2dEnv,
'walker-param': Walker2DRandParamsEnv,
}[name]
def domain_to_epoch(name):
return {
'invertedpendulum': 300,
'humanoid': 9000,
'halfcheetah': 5000,
'halfcheetah-vel': 50,
'hopper': 50,
'hopper-vel': 50,
'ant-goal': 590,
'ant-dir': 590,
'ant': 5000,
'humanoid-dir':590,
'humanoid-openai-dir':590,
'humanoid-ndone-goal': 590,
'walker2d': 5000,
'walker-param': 390,
}[name]
def domain_to_num_goals(name):
return {
'halfcheetah-vel': 32,
'hopper-vel': 16,
'ant-goal': 32,
'ant-dir': 32,
'humanoid-dir': 32,
'humanoid-openai-dir': 10,
'humanoid-ndone-goal': 10,
'walker-param': 32,
}[name]
def env_producer(domain, seed, goal=None):
env = domain_to_env(domain)(goal=goal)
env.seed(seed)
env = NormalizedBoxEnv(env)
return env
| [
"[email protected]"
] | |
3973203794a335401a2e5cfa6e3206483a4d7116 | d26b3bbf0192cc334e5ac431c753ebcbf2baeb1a | /l10n_cn_hr_payroll/__init__.py | 6adc439b170cc365b31453ea0481a8ba0709b7a9 | [] | no_license | davgit/Xero-2 | 1d566357174d15d4f3b15cc849ce9f32f0c9ef3a | 6477d844fde3f3b8f91d21b15ee7f8986a505de5 | refs/heads/master | 2021-01-21T20:49:47.585328 | 2013-02-16T08:13:22 | 2013-02-16T08:13:22 | 22,778,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,072 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2011 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
import l10n_cn_hr_payroll
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| [
"[email protected]"
] | |
07ccbaa13946f30e8d2d81bdcc3c948f8adb3036 | 5eff9df4d276e83c68ce843d58868499858f701a | /Interview/Trees/binary_tree_traversal.py | e5a7ce276633e535f5c96cfc7a75b9b0cfffea65 | [] | no_license | arunraman/Code-Katas | b6723deb00caed58f0c9a1cafdbe807e39e96961 | 7fe3582fa6acf59a2620fe73e1e14bd8635bbee8 | refs/heads/master | 2023-03-04T17:27:44.037145 | 2023-03-02T21:09:53 | 2023-03-02T21:09:53 | 25,232,784 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 851 | py | from binarytree import Node as Treenode
class Solution():
def preOrder(self, root):
if root == None:
return
print root.value,
self.preOrder(root.left)
self.preOrder(root.right)
def inOrder(self, root):
if root == None:
return
self.inOrder(root.left)
print root.value,
self.inOrder(root.right)
def postOrder(self, root):
if root == None:
return
self.postOrder(root.left)
self.postOrder(root.right)
print root.value,
S = Solution()
root = Treenode(1)
root.left = Treenode(2)
root.right = Treenode(3)
root.left.left = Treenode(8)
root.left.right = Treenode(12)
root.right.left = Treenode(3)
root.right.right = Treenode(25)
print root
S.preOrder(root)
print "\n"
S.inOrder(root)
print "\n"
S.postOrder(root) | [
"[email protected]"
] | |
7c522e09e37bfa9cd52933f4b3a202340868c5d4 | 8c95e2185100db97f74d948407f9f6ac563905e5 | /metronotation/routemap.py | 8a6691a352602ddc2fcb031cd4e836d9009a1748 | [
"MIT"
] | permissive | kitao/metro-notation | c5fec21fccba4ef2a21c3294575fd29498ff8ebc | 34a9d2ca9fe17452c8eb5426636484f7cc29c605 | refs/heads/main | 2023-08-20T15:02:04.631092 | 2021-10-30T04:28:17 | 2021-10-30T04:28:17 | 321,700,124 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,037 | py | LAYER_TP = 0
LAYER_MD = 1
LAYER_BT = 2
LAYER_TM = 3
LAYER_BM = 4
LAYER_AL = 5
DIR_UP = (0, -1)
DIR_DN = (0, 1)
DIR_LT = (-1, 0)
DIR_RT = (1, 0)
DIR_LU = (-1, -1)
DIR_RD = (1, 1)
LETTER_TABLE = [
("R", (LAYER_TP, DIR_UP, 1)),
("M", (LAYER_MD, DIR_DN, 1)),
("L", (LAYER_BT, DIR_DN, 1)),
("U", (LAYER_TP, DIR_RT, 1)),
("E", (LAYER_MD, DIR_LT, 1)),
("D", (LAYER_BT, DIR_LT, 1)),
("F", (LAYER_TP, DIR_RD, 1)),
("S", (LAYER_MD, DIR_RD, 1)),
("B", (LAYER_BT, DIR_LU, 1)),
#
("R2", (LAYER_TP, DIR_UP, 2)),
("M2", (LAYER_MD, DIR_DN, 2)),
("L2", (LAYER_BT, DIR_DN, 2)),
("U2", (LAYER_TP, DIR_RT, 2)),
("E2", (LAYER_MD, DIR_LT, 2)),
("D2", (LAYER_BT, DIR_LT, 2)),
("F2", (LAYER_TP, DIR_RD, 2)),
("S2", (LAYER_MD, DIR_RD, 2)),
("B2", (LAYER_BT, DIR_LU, 2)),
#
("R'", (LAYER_TP, DIR_DN, 1)),
("M'", (LAYER_MD, DIR_UP, 1)),
("L'", (LAYER_BT, DIR_UP, 1)),
("U'", (LAYER_TP, DIR_LT, 1)),
("E'", (LAYER_MD, DIR_RT, 1)),
("D'", (LAYER_BT, DIR_RT, 1)),
("F'", (LAYER_TP, DIR_LU, 1)),
("S'", (LAYER_MD, DIR_LU, 1)),
("B'", (LAYER_BT, DIR_RD, 1)),
#
("R2'", (LAYER_TP, DIR_DN, 2)),
("M2'", (LAYER_MD, DIR_UP, 2)),
("L2'", (LAYER_BT, DIR_UP, 2)),
("U2'", (LAYER_TP, DIR_LT, 2)),
("E2'", (LAYER_MD, DIR_RT, 2)),
("D2'", (LAYER_BT, DIR_RT, 2)),
("F2'", (LAYER_TP, DIR_LU, 2)),
("S2'", (LAYER_MD, DIR_LU, 2)),
("B2'", (LAYER_BT, DIR_RD, 2)),
#
("Rw", (LAYER_TM, DIR_UP, 1)),
("Lw", (LAYER_BM, DIR_DN, 1)),
("Uw", (LAYER_TM, DIR_RT, 1)),
("Dw", (LAYER_BM, DIR_LT, 1)),
("Fw", (LAYER_TM, DIR_RD, 1)),
("Bw", (LAYER_BM, DIR_LU, 1)),
#
("Rw2", (LAYER_TM, DIR_UP, 2)),
("Lw2", (LAYER_BM, DIR_DN, 2)),
("Uw2", (LAYER_TM, DIR_RT, 2)),
("Dw2", (LAYER_BM, DIR_LT, 2)),
("Fw2", (LAYER_TM, DIR_RD, 2)),
("Bw2", (LAYER_BM, DIR_LU, 2)),
#
("Rw'", (LAYER_TM, DIR_DN, 1)),
("Lw'", (LAYER_BM, DIR_UP, 1)),
("Uw'", (LAYER_TM, DIR_LT, 1)),
("Dw'", (LAYER_BM, DIR_RT, 1)),
("Fw'", (LAYER_TM, DIR_LU, 1)),
("Bw'", (LAYER_BM, DIR_RD, 1)),
#
("Rw2'", (LAYER_TM, DIR_DN, 2)),
("Lw2'", (LAYER_BM, DIR_UP, 2)),
("Uw2'", (LAYER_TM, DIR_LT, 2)),
("Dw2'", (LAYER_BM, DIR_RT, 2)),
("Fw2'", (LAYER_TM, DIR_LU, 2)),
("Bw2'", (LAYER_BM, DIR_RD, 2)),
#
("x", (LAYER_AL, DIR_UP, 1)),
("x'", (LAYER_AL, DIR_DN, 1)),
("y", (LAYER_AL, DIR_RT, 1)),
("y'", (LAYER_AL, DIR_LT, 1)),
("z", (LAYER_AL, DIR_RD, 1)),
("z'", (LAYER_AL, DIR_LU, 1)),
]
LETTER_TABLE.sort(key=lambda x: len(x[0]), reverse=True)
CUBE_RF = 0
CUBE_OF = 1
CUBE_BF = 2
CUBE_GF = 3
CUBE_WF = 4
CUBE_YF = 5
CUBE_RB = 6
CUBE_OB = 7
CUBE_BB = 8
CUBE_GB = 9
CUBE_WB = 10
CUBE_YB = 11
CUBE_TABLE = {
"R": CUBE_RF,
"O": CUBE_OF,
"B": CUBE_BF,
"G": CUBE_GF,
"W": CUBE_WF,
"Y": CUBE_YF,
"r": CUBE_RB,
"o": CUBE_OB,
"b": CUBE_BB,
"g": CUBE_GB,
"w": CUBE_WB,
"y": CUBE_YB,
}
class Node:
def __init__(self, letters, layer, direction, distance):
self.letters = letters
self.layer = layer
self.direction = direction
self.distance = distance
self.is_start_hit = False
self.is_end_hit = False
def from_letters(letters):
for l, n in LETTER_TABLE:
if letters.startswith(l):
return Node(l, *n), letters[len(l) :]
raise ValueError
class Route:
def __init__(self, nodes):
x = y = 0
min_x = min_y = 0
max_x = max_y = 0
route_count = {(0, 0): 1}
last_direction = (0, 0)
last_layer = -1
for node in nodes:
if (
node.direction == last_direction
and node.layer == last_layer
or node.direction[0] + last_direction[0] == 0
and node.direction[1] + last_direction[1] == 0
):
raise ValueError
last_direction = node.direction
last_layer = node.layer
for i in range(node.distance):
x += node.direction[0]
y += node.direction[1]
min_x = min(x, min_x)
min_y = min(y, min_y)
max_x = max(x, max_x)
max_y = max(y, max_y)
if (x, y) in route_count:
route_count[(x, y)] += 1
else:
route_count[(x, y)] = 1
for pos, count in route_count.items():
if count >= 3 or count >= 2 and pos != (0, 0) and pos != (x, y):
raise ValueError
self.nodes = nodes
self.width = max_x - min_x
self.height = max_y - min_y
self.start_x = -min_x
self.start_y = -min_y
nodes[0].is_start_hit = route_count[(0, 0)] > 1
nodes[-1].is_end_hit = route_count[(x, y)] > 1
def from_letters(letters):
try:
nodes = []
rest = letters
while rest:
node, rest = Node.from_letters(rest)
nodes.append(node)
route = Route(nodes)
except ValueError:
raise ValueError(letters)
return route
class RouteMap:
def __init__(self, name, cube, routes):
self.name = name
self.cube = cube
self.routes = routes
self.width = sum([route.width for route in routes])
self.height = max([route.height for route in routes])
for route in routes:
route.start_y += (self.height - route.height) / 2
def from_letters(name, cube, letters):
if not cube:
cube = "w" * 21
elif len(cube) != 21:
raise ValueError(cube)
try:
cube = [CUBE_TABLE[c] for c in cube]
except KeyError:
raise ValueError(cube)
name = name or "no name"
routes = [Route.from_letters(l) for l in letters.split()]
return RouteMap(name, cube, routes)
| [
"[email protected]"
] | |
d8886e88937323eb625f4951e4a73b8b82235212 | 8a42be3f930d8a215394a96ad2e91c95c3b7ff86 | /Build/Instalation/GeneralDb/Marathon/MarathonTests_1.1/LargeFile_Edit/TestCases/V65_Changes/Diff_TwoLayouts1.py | f446d02edef1afedb4cb381315227b3bf6fde9a1 | [] | no_license | java-tools/jrec | 742e741418c987baa4350390d126d74c0d7c4689 | 9ece143cdd52832804eca6f3fb4a1490e2a6f891 | refs/heads/master | 2021-09-27T19:24:11.979955 | 2017-11-18T06:35:31 | 2017-11-18T06:35:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,811 | py | useFixture(default)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_03'
if window('Record Editor'):
commonBits.selectOldFilemenu(select_menu, 'Utilities', 'Compare Menu')
click('*2')
click('Choose File')
if window('Open'):
select(commonBits.selectPane(), 'Ams_LocDownload_20041228_Extract.txt')
click('Open')
close()
commonBits.setRecordLayout(select, 'ams Store')
click('Right')
select('TabbedPane', '')
click('Choose File')
if window('Open'):
select(commonBits.selectPane(), 'Ams_LocDownload_20041228_Extract2.txt')
click('Open')
close()
commonBits.setRecordLayout(select, 'ams Store')
click('Right')
select('TabbedPane', '')
click('Right')
select('TabbedPane', '')
click('Compare')
select('Table', 'cell:Loc Name,11(Highpoint City)')
assert_p('Table', 'Text', 'St Marys', 'Loc Name,12')
select('Table', 'cell:Loc Name,14(Bass Hill)')
assert_p('Table', 'Content', '[[, , , , , , , , , , , , ], [, Inserted, 1, TAR, 5839, DC, DC - Taras Ave, , 30-68 Taras Ave, Altona North, 3025, VIC, A], [, , , , , , , , , , , , ], [, Inserted, 2, TAR, 5850, DC, VIC West Ad Support, , Lot 2 Little Boundary Rd, Laverton, 3028, VIC, A], [, Old, 4, TAR, 5035, ST, Rockdale, Building B, Portside DC, 2-8 Mc Pherson Street, Botany, 2019, NSW, A], [, New, 6, , 5096, , Canberra Civic, Target Canberra, Canberra City Centre, Akuna Ave, Canberra, 2601, ACT, ], [, Old, 5, TAR, 5037, ST, Miranda, Westfield Shoppingtown, Cnr. Urunga Pde & The Kingsway, Miranda, 2228, NSW, A], [, New, 7, , 5012, , Ringwood, Ringwood, Seymour Street, Ringwood, 3134, VIC, ], [, Old, 6, TAR, 5052, ST, Eastwood, Marayong Offsite Reserve, 11 Melissa Place, Marayong, 2148, NSW, A], [, New, 8, , 5030, , Epping, Epping Plaza Shopping Centre, Cnr. High & Cooper Streets, Epping, 3076, VIC, ], [, Old, 7, TAR, 5055, ST, Leichhardt, Marketown, Marion Street, Leichhardt, 2040, NSW, A], [, New, 9, , 5054, , Highpoint City, Laverton, Lot 2, Cnr Lt Boundry & Old Geelong Road, Laverton, 3028, VIC, ], [, Old, 8, TAR, 5060, ST, St Marys, St. Mary\'s, Charles Hackett Drive, St Mary\'s, 2760, NSW, A], [, New, 10, , 5062, , Castletown, Townsville, Cnr. Woolcock St. & Kings Road, Townsville, 4810, QLD, ], [, Old, 9, TAR, 5070, ST, Bass Hill, Bass Hill Plaza, 753 Hume Highway, Bass Hill, 2197, NSW, A], [, New, 11, , 5138, , Cairns Central, Cairns, Cnr. McLeod & Aplin Streets, Cairns, 4870, QLD, ], [, Old, 10, TAR, 5074, ST, Campbelltown, Campbelltown Mall, 303 Queen Street, Campbelltown, 2560, NSW, A], [, New, 12, , 5141, , The Willows, Thuringowa Central, Cnr Thuringowa Drive & Range Rd, Thuringowa Central, 4817, QLD, ], [, Old, 11, TAR, 5078, ST, Warringah Mall, Frenchs Forest, Units 2-3, 14 Aquatic Drive, Frenchs Forest, 2086, NSW, A], [, New, 13, , 5146, , Palmerston, Palmerston Shopping Centre, Temple Terrace, Palmerston, 0830, NT, ], [, Old, 12, TAR, 5081, ST, Ashfield, Ashfield Mall, Knox Street, Ashfield, 2131, NSW, A], [, New, 14, , 5002, , Coffs Harbour, Coffs Harbour, Cnr. Park Beach Road & Pacific Hwy, Coffs Harbour, 2450, , ], [, Old, 13, TAR, 5085, ST, Roselands, Condell park, Unit 2, 39-41 Allingham Street, Condell Park, 2200, NSW, A], [, New, 15, , 5966, DC, Huntingwood DC, Huntingwood DC, 35 Huntingwood Drive, Huntingwood, 2148, , ], [, , , , , , , , , , , , ], [, Inserted, 16, TAR, 5967, DC, Hendra DC, Hendra DC, Cnr Headly Ave & Nudgee Road, Hendra, 4011, QLD, A], [, , , , , , , , , , , , ], [, Inserted, 17, TAR, 5968, DC, Beverly DC, Beverly DC, 117 Main Street, Beverly, 5009, SA, A]]')
select('Table', 'cell:Loc Name,14(Bass Hill)')
click('All Included Lines')
select('Table', 'cell:Loc Addr Ln1,8(Marayong)')
assert_p('Table', 'Content', '[[, , , , , , , , , , , , ], [, Inserted, 1, TAR, 5839, DC, DC - Taras Ave, , 30-68 Taras Ave, Altona North, 3025, VIC, A], [, , , , , , , , , , , , ], [, Inserted, 2, TAR, 5850, DC, VIC West Ad Support, , Lot 2 Little Boundary Rd, Laverton, 3028, VIC, A], [, Old, 1, TAR, 5015, ST, Bankstown, Bankstown, Unit 2, 39-41 Allingham Street, Condell Park, 2200, NSW, A], [, New, 3, , , , , , , , , , ], [, Old, 2, TAR, 5019, ST, Penrith, Penrith, 58 Leland Street, Penrith, 2750, NSW, A], [, New, 4, , , , , , , , , , ], [, Old, 3, TAR, 5033, ST, Blacktown, Marayong, Dock 2, 11 Melissa Place, Marayong, 2148, NSW, A], [, New, 5, , , , , , , , , , ], [, Old, 4, TAR, 5035, ST, Rockdale, Building B, Portside DC, 2-8 Mc Pherson Street, Botany, 2019, NSW, A], [, New, 6, , 5096, , Canberra Civic, Target Canberra, Canberra City Centre, Akuna Ave, Canberra, 2601, ACT, ], [, Old, 5, TAR, 5037, ST, Miranda, Westfield Shoppingtown, Cnr. Urunga Pde & The Kingsway, Miranda, 2228, NSW, A], [, New, 7, , 5012, , Ringwood, Ringwood, Seymour Street, Ringwood, 3134, VIC, ], [, Old, 6, TAR, 5052, ST, Eastwood, Marayong Offsite Reserve, 11 Melissa Place, Marayong, 2148, NSW, A], [, New, 8, , 5030, , Epping, Epping Plaza Shopping Centre, Cnr. High & Cooper Streets, Epping, 3076, VIC, ], [, Old, 7, TAR, 5055, ST, Leichhardt, Marketown, Marion Street, Leichhardt, 2040, NSW, A], [, New, 9, , 5054, , Highpoint City, Laverton, Lot 2, Cnr Lt Boundry & Old Geelong Road, Laverton, 3028, VIC, ], [, Old, 8, TAR, 5060, ST, St Marys, St. Mary\'s, Charles Hackett Drive, St Mary\'s, 2760, NSW, A], [, New, 10, , 5062, , Castletown, Townsville, Cnr. Woolcock St. & Kings Road, Townsville, 4810, QLD, ], [, Old, 9, TAR, 5070, ST, Bass Hill, Bass Hill Plaza, 753 Hume Highway, Bass Hill, 2197, NSW, A], [, New, 11, , 5138, , Cairns Central, Cairns, Cnr. McLeod & Aplin Streets, Cairns, 4870, QLD, ], [, Old, 10, TAR, 5074, ST, Campbelltown, Campbelltown Mall, 303 Queen Street, Campbelltown, 2560, NSW, A], [, New, 12, , 5141, , The Willows, Thuringowa Central, Cnr Thuringowa Drive & Range Rd, Thuringowa Central, 4817, QLD, ], [, Old, 11, TAR, 5078, ST, Warringah Mall, Frenchs Forest, Units 2-3, 14 Aquatic Drive, Frenchs Forest, 2086, NSW, A], [, New, 13, , 5146, , Palmerston, Palmerston Shopping Centre, Temple Terrace, Palmerston, 0830, NT, ], [, Old, 12, TAR, 5081, ST, Ashfield, Ashfield Mall, Knox Street, Ashfield, 2131, NSW, A], [, New, 14, , 5002, , Coffs Harbour, Coffs Harbour, Cnr. Park Beach Road & Pacific Hwy, Coffs Harbour, 2450, , ], [, Old, 13, TAR, 5085, ST, Roselands, Condell park, Unit 2, 39-41 Allingham Street, Condell Park, 2200, NSW, A], [, New, 15, , 5966, DC, Huntingwood DC, Huntingwood DC, 35 Huntingwood Drive, Huntingwood, 2148, , ], [, , , , , , , , , , , , ], [, Inserted, 16, TAR, 5967, DC, Hendra DC, Hendra DC, Cnr Headly Ave & Nudgee Road, Hendra, 4011, QLD, A], [, , , , , , , , , , , , ], [, Inserted, 17, TAR, 5968, DC, Beverly DC, Beverly DC, 117 Main Street, Beverly, 5009, SA, A]]')
select('Table', 'cell:Loc Addr Ln1,8(Marayong)')
close()
| [
"bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec"
] | bruce_a_martin@b856f413-25aa-4700-8b60-b3441822b2ec |
4f5f6cf6b975bc75e55183392098c5035bdaf30d | a742bd051641865d2e5b5d299c6bc14ddad47f22 | /algorithm/牛客网/55-链表中环的入口节点.py | cb9f7c566cc7b629c3e7d7a7aef88c03f3a1a921 | [] | no_license | lxconfig/UbuntuCode_bak | fb8f9fae7c42cf6d984bf8231604ccec309fb604 | 3508e1ce089131b19603c3206aab4cf43023bb19 | refs/heads/master | 2023-02-03T19:10:32.001740 | 2020-12-19T07:27:57 | 2020-12-19T07:27:57 | 321,351,481 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,604 | py |
"""
给一个链表,若其中包含环,请找出该链表的环的入口结点,否则,输出null。
思路:
双指针法
快指针先走两步,慢指针走一步
当两个指针又相遇了,此时指向的节点可能是环的入口节点
再次让慢指针回到链表头,然后和快指针一起走,再次相遇时,就是环的入口节点
否则,快指针不存在时,表示没有环
或:
先让快指针走n步,n=链表的长度
之后再让快指针和慢指针一起走,直到相遇,此时就是环的入口节点
"""
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def EntryNodeOfLoop(self, pHead):
# 运行时间:22ms 占用内存:5864k
if not pHead:
return None
fast = slow = pHead
while fast and fast.next:
fast = fast.next.next
slow = slow.next
if fast == slow:
break
if not fast or not fast.next:
return None
slow = pHead
while fast != slow:
fast = fast.next
slow = slow.next
return fast.val
if __name__ == "__main__":
solution = Solution()
a = ListNode(1)
b = ListNode(2)
c = ListNode(3)
d = ListNode(4)
e = ListNode(5)
f = ListNode(6)
a.next= b
b.next = c
c.next = d
d.next = e
e.next = c
# f.next = d
print(solution.EntryNodeOfLoop(a)) | [
"[email protected]"
] | |
0c602c4d5aba8185e74b266e1050df2cd0ec026c | 111082d7fd02a5f64cd1784b923a109cc95dc557 | /dj_rulitool/wsgi.py | 7ac1fe5349618d888f4dc721c11e7dfd58b406e3 | [] | no_license | 270466585/dj_rulitool | ba65a6ef1bc44b599f19ac1172d86e8d4b2a12af | 0d2e97454c66d30537780d81b2a0b4b2f953b2ed | refs/heads/master | 2020-04-14T20:25:36.592762 | 2019-01-04T10:28:18 | 2019-01-04T10:28:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | py | """
WSGI config for dj_rulitool project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "dj_rulitool.settings")
application = get_wsgi_application()
| [
"[email protected]"
] | |
5224d8389d28f53149bb9a84556ad05b34511670 | 32711a21edff968fdbf9fa9baf0e0f8373d0e131 | /authapp/forms.py | fa7e7e88283332593f25f77dbbd8f2f33b5d24c6 | [] | no_license | acid-n/GeekShop | ca836a4daeb97754fafd44d36e705f0e160c8d4d | 9749debe92e6ded46ed01082fbdb497a5f8485fa | refs/heads/master | 2023-01-15T15:29:18.172547 | 2020-11-25T18:34:39 | 2020-11-25T18:34:39 | 296,569,582 | 0 | 0 | null | 2020-10-04T18:39:29 | 2020-09-18T09:02:03 | JavaScript | UTF-8 | Python | false | false | 2,620 | py | import hashlib
import random
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm, UserChangeForm
from authapp.models import ShopUser, ShopUserProfile
class ShopUserLoginForm(AuthenticationForm):
class Meta:
model = ShopUser
fields = ('username', 'password')
def __init__(self, *args, **kwargs):
super(ShopUserLoginForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
class ShopUserRegisterForm(UserCreationForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'password1', 'password2', 'email', 'avatar', 'age')
def __init__(self, *args, **kwargs):
super(ShopUserRegisterForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды")
return data
def save(self, **kwargs):
user = super(ShopUserRegisterForm, self).save()
user.is_active = False
salt = hashlib.sha1(str(random.random()).encode('utf8')).hexdigest()[:6]
user.activation_key = hashlib.sha1((user.email + salt).encode('utf8')).hexdigest()
user.save()
return user
class ShopUserEditForm(UserChangeForm):
class Meta:
model = ShopUser
fields = ('username', 'first_name', 'email', 'avatar', 'age')
def __init__(self, *args, **kwargs):
super(ShopUserEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = ''
if field_name == 'password':
field.widget = forms.HiddenInput()
def clean_age(self):
data = self.cleaned_data['age']
if data < 18:
raise forms.ValidationError("Вы слишком молоды")
return data
class ShopUserProfileEditForm(forms.ModelForm):
class Meta:
model = ShopUserProfile
fields = ('tagline', 'about_me', 'gender')
def __init__(self, *args, **kwargs):
super(ShopUserProfileEditForm, self).__init__(*args, **kwargs)
for field_name, field in self.fields.items():
field.widget.attrs['class'] = "form-control"
field.help_text = '' | [
"[email protected]"
] | |
6ac6c0894bfa4d2f46e20bd466eb57471523bfb5 | ed78041a12c60e46bb0c4d347c47536e84307a96 | /app/__init__.py | ea59f07f74501ccfe1fe861e921ef187326337da | [] | no_license | Garfield247/news_nlp | 4875842af4249def6ffdc65a6e5896b02610dd8d | e18d178824ea9bf11d3895c58037a211f4b21cb6 | refs/heads/master | 2022-12-11T21:00:36.967826 | 2019-03-15T02:32:46 | 2019-03-15T02:32:46 | 161,159,779 | 0 | 0 | null | 2022-12-08T04:52:15 | 2018-12-10T10:48:41 | JavaScript | UTF-8 | Python | false | false | 945 | py | # -*- coding: utf-8 -*-
from flask import Flask, render_template
from app.config import config
from app.extensions import config_extensions
from app.views import config_blueprint
# 封装一个方法,专门用于创建Flask实例
def create_app(config_name): # development
# 创建应用实例
app = Flask(__name__)
# 初始化配置
app.config.from_object(config.get(config_name) or config['default'])
# 调用初始化函数
config[config_name].init_app(app)
# 配置扩展
config_extensions(app)
# 配置蓝本
config_blueprint(app)
# 错误页面定制
config_errorhandler(app)
# 返回应用实例
return app
def config_errorhandler(app):
# 如果在蓝本定制,只针对本蓝本的错误有效,
# 可以使用app_errorhandler定制全局有效的错误显示
@app.errorhandler(404)
def page_not_found(e):
return render_template('errors/404.html')
| [
"[email protected]"
] | |
09997d079fdba85719df5fe4ccf2d3f6d5988d74 | 0e9789668dcfeeedacf78aa9917bb95ec9a5f763 | /preprocessing/load_data.py | 5ff6f999bcc4fb4aae3d0baad46dc27ccc9be878 | [] | no_license | mma1979/Simple-Sentence-Similarity | 76151619bcdfd39054f8b6cbe1e26af99d0f6a37 | dfacb34c325df771056f34f85c7927148d69691c | refs/heads/master | 2022-04-11T00:15:07.415752 | 2020-01-28T13:06:42 | 2020-01-28T13:06:42 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,926 | py | import os
import pandas as pd
import requests
import tensorflow as tf
def load_sts_dataset(filename):
"""
Loads a subset of the STS dataset into a DataFrame.
In particular both sentences and their human rated similarity score.
:param filename:
:return:
"""
sent_pairs = []
with tf.gfile.GFile(filename, "r") as f:
for line in f:
ts = line.strip().split("\t")
sent_pairs.append((ts[5], ts[6], float(ts[4])))
return pd.DataFrame(sent_pairs, columns=["sent_1", "sent_2", "sim"])
def download_and_load_sts_data():
sts_dataset = tf.keras.utils.get_file(
fname="Stsbenchmark.tar.gz",
origin="http://ixa2.si.ehu.es/stswiki/images/4/48/Stsbenchmark.tar.gz",
extract=True)
sts_dev = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-dev.csv"))
sts_test = load_sts_dataset(os.path.join(os.path.dirname(sts_dataset), "stsbenchmark", "sts-test.csv"))
return sts_dev, sts_test
def download_sick_dataset(url):
response = requests.get(url).text
lines = response.split("\n")[1:]
lines = [l.split("\t") for l in lines if len(l) > 0]
lines = [l for l in lines if len(l) == 5]
df = pd.DataFrame(lines, columns=["idx", "sent_1", "sent_2", "sim", "label"])
df['sim'] = pd.to_numeric(df['sim'])
return df
def download_and_load_sick_dataset():
sick_train = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_train.txt")
sick_dev = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_trial.txt")
sick_test = download_sick_dataset(
"https://raw.githubusercontent.com/alvations/stasis/master/SICK-data/SICK_test_annotated.txt")
sick_all = sick_train.append(sick_test).append(sick_dev)
return sick_all, sick_train, sick_test, sick_dev
| [
"[email protected]"
] | |
cfe30dfb145e5c7610d9b424ad9cb71f37e95724 | 09e63e204cf3f70b0f878fe237f231af0786611e | /LifeQA/LSTM_QA.py | 2e7249b817077a5418b4be8df812dcb9c1c1f866 | [] | no_license | shubham14/Machine_learning_research | 8f00788366abf2d330afe8914e48d4279fcd8aea | b134e4e6b1e6c110fad8cb38b033c92c34d3c8ce | refs/heads/master | 2022-11-08T13:24:58.722027 | 2019-11-10T09:21:28 | 2019-11-10T09:21:28 | 132,386,307 | 3 | 2 | null | 2022-10-17T15:36:25 | 2018-05-07T00:16:38 | Python | UTF-8 | Python | false | false | 3,186 | py | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 16 16:55:06 2018
@author: Shubham
"""
import numpy as np
from keras import backend as K
from keras.layers import Embedding
from keras.layers import LSTM, Input, merge, Lambda
from keras.layers.wrappers import Bidirectional
from keras.layers.convolutional import Convolution1D
from keras.models import Model
class Model:
def __init__(self, margin, enc_timesteps, dec_timesteps,
margin, hidden_dim, embedding_file, vocab_size):
self.margin = margin
self.enc_timesteps = enc_timesteps
self.dec_timesteps = dec_timesteps
self.hidden_dim = hidden_dim
self.embedding_file = embedding_file
self.vocab_size = vocab_size
def cosine_similarity(self):
dot = lambda a, b: K.batch_dot(a, b, axes=1)
return lambda x: dot(x[0], x[1]) / K.maximum(K.sqrt(dot(x[0], x[0]) * dot(x[1], x[1])), K.epsilon())
def build_model(self):
# initialize the question and answer shapes and datatype
question = Input(shape=(self.enc_timesteps,), dtype='int32', name='question_base')
answer = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer')
answer_good = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer_good_base')
answer_bad = Input(shape=(self.dec_timesteps,), dtype='int32', name='answer_bad_base')
weights = np.load(self.embedding_file)
qa_embedding = Embedding(input_dim=self.vocab_size,
output_dim=weights.shape[1],mask_zero=True,weights=[weights])
bi_lstm = Bidirectional(LSTM(activation='tanh', dropout=0.2, units=self.hidden_dim,
return_sequences=False))
# embed the question and pass it through bilstm
question_embedding = qa_embedding(question)
question_enc_1 = bi_lstm(question_embedding)
# embed the answer and pass it through bilstm
answer_embedding = qa_embedding(answer)
answer_enc_1 = bi_lstm(answer_embedding)
# get the cosine similarity
similarity = self.get_cosine_similarity()
question_answer_merged = merge(inputs=[question_enc_1, answer_enc_1], mode=similarity, output_shape=lambda _: (None, 1))
lstm_model = Model(name="bi_lstm", inputs=[question, answer], outputs=question_answer_merged)
good_similarity = lstm_model([question, answer_good])
bad_similarity = lstm_model([question, answer_bad])
loss = merge(
[good_similarity, bad_similarity],
mode=lambda x: K.relu(margin - x[0] + x[1]),
output_shape=lambda x: x[0])
training_model = Model(inputs=[question, answer_good, answer_bad], outputs=loss, name='training_model')
training_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer="rmsprop")
prediction_model = Model(inputs=[question, answer_good], outputs=good_similarity, name='prediction_model')
prediction_model.compile(loss=lambda y_true, y_pred: y_pred, optimizer="rmsprop")
return training_model, prediction_model
| [
"[email protected]"
] | |
a5c8acc3f261fc484e471a9c6729ba0a2951f7ea | 6cc37dfc44880f57823bb9523ea5f8206d5e3f22 | /python_OOP/labs_and_homeworks/09_decorators_exercise/07_execution_time.py | 672ebddef602603926ee47bec252adbc7b08d114 | [] | no_license | dimitar-daskalov/SoftUni-Courses | 70d265936fd86712a7bfe0586ec6ebd1c7384f77 | 2054bc58ffb5f41ed86f5d7c98729b101c3b1368 | refs/heads/main | 2023-05-31T06:44:35.498399 | 2021-07-11T10:16:08 | 2021-07-11T10:16:08 | 322,896,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | import time
def exec_time(func):
def wrapper(*args):
start = time.time()
func(*args)
end = time.time()
time_spend = end - start
return time_spend
return wrapper
@exec_time
def loop(start, end):
total = 0
for x in range(start, end):
total += x
return total
print(loop(1, 10000000))
@exec_time
def concatenate(strings):
result = ""
for string in strings:
result += string
return result
print(concatenate(["a" for i in range(1000000)]))
| [
"[email protected]"
] | |
488718466f0f0e87ffa34be480e9e92c0c8df57a | 9a701c23ef6e70dc3704f012ffbb1e2689f7a8cb | /Lib/zDogPy/box.py | 5006b0c8963abce56336c69e361803f02212a395 | [
"MIT"
] | permissive | gferreira/zdogpy | a832db713524d1343b85de1c8215511f438a2e41 | 41304e5db7cc2e145d43b6b2f7d77d25ec3c8b08 | refs/heads/master | 2020-05-30T07:50:24.621323 | 2019-09-11T09:30:59 | 2019-09-11T09:30:59 | 189,606,401 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,196 | py | '''Box composite shape'''
from importlib import reload
import zDogPy.anchor
reload(zDogPy.anchor)
import zDogPy.shape
reload(zDogPy.shape)
import zDogPy.rect
reload(zDogPy.rect)
from zDogPy.boilerplate import hexToRGB, TAU
from zDogPy.anchor import Anchor
from zDogPy.shape import Shape
from zDogPy.rect import Rect
# -------
# BoxRect
# -------
class BoxRect(Rect):
def copyGraph(self):
pass
# ---
# Box
# ---
class Box(Anchor):
frontFace = None
rearFace = None
leftFace = None
rightFace = None
topFace = None
bottomFace = None
def __init__(self, width=1, height=1, depth=1, stroke=1, fill=True, color=True, frontFace=True, rearFace=True, leftFace=True, rightFace=True, topFace=True, bottomFace=True, **kwargs):
self.width = width
self.height = height
self.depth = depth
self.stroke = stroke
self.fill = fill
self.color = color
self.frontFace = frontFace
self.rearFace = rearFace
self.leftFace = leftFace
self.rightFace = rightFace
self.topFace = topFace
self.bottomFace = bottomFace
Anchor.__init__(self, **kwargs)
self.updatePath()
def updatePath(self):
self.setFace('frontFace', {
'width' : self.width,
'height' : self.height,
'translate' : { 'z': self.depth / 2 },
})
self.setFace('rearFace', {
'width' : self.width,
'height' : self.height,
'translate' : { 'z': -self.depth / 2 },
})
self.setFace('leftFace', {
'width' : self.depth,
'height' : self.height,
'translate' : { 'x': -self.width / 2 },
'rotate' : { 'y': -TAU / 4 },
})
self.setFace('rightFace', {
'width' : self.depth,
'height' : self.height,
'translate' : { 'x': self.width / 2 },
'rotate' : { 'y': TAU / 4 },
})
self.setFace('topFace', {
'width' : self.width,
'height' : self.depth,
'translate' : { 'y': -self.height / 2 },
'rotate' : { 'x': -TAU / 4 },
})
self.setFace('bottomFace', {
'width' : self.width,
'height' : self.depth,
'translate' : { 'y': self.height / 2 },
'rotate' : { 'x': -TAU / 4 },
})
def setFace(self, faceName, options):
attr = getattr(self, faceName)
rectProperty = faceName + 'Rect'
# remove if False (??)
if not attr:
# self.removeChild(rectProperty)
return
if isinstance(attr, tuple):
color = attr
elif type(attr) is str:
color = hexToRGB(attr)
else:
color = self.color
rect = BoxRect(**options)
rect.stroke = self.stroke
rect.fill = self.fill
rect.color = color
# rect.backface = self.backface
# rect.front = self.front
# rect.visible = self.visible
rect.updatePath()
self.addChild(rect)
| [
"[email protected]"
] | |
a4536f0fe2f8a612a01725277078ce3b79778683 | 5707a6e95d6388a320416d7c06c275daf61e3406 | /Unidad2/ej1.py | 54e6a486f22b1ad338b6cbaf6b579ebbc1bebc68 | [] | no_license | hectorrdz98/lenguajes-y-automatas-1 | ed3406e8a7b7eaad489c530146cddac5a972bc81 | 0004c4696a92cdd33a86a24f82d0f9b7e01e455c | refs/heads/master | 2022-01-21T21:24:21.819330 | 2019-05-29T12:42:42 | 2019-05-29T12:42:42 | 167,203,004 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,084 | py |
"""
Autor: Hector Rodriguez
"""
"""
Este codigo lee el archivo doc.txt (debe estar al mismo nivel de carpeta que este archivo)
y muestra en consola para cada linea del documento que tipo de elemento o a que categoría pertenece.
Estas son mis condiciones:
1.- Entero: Números de 0-9
2.- Flotante: Números de 0-9 seguido de un . y más números de 0-9
3.- Variable: Conjunto de letras de la A-Z mayúsculas y minúsculas, _ y dígitos de 0-9 que no debe iniciar con 0-9
4.- String: Cadena de carateres que inicia y cierra con "
5.- Aritmética: Expresión con entero, flotante o variable seguida de un * + - / ^ y luego otro entero, flotante o variable
no pueden haber dos * + - / ^ juntos o terminar la expresión con * + - / ^
6.- Relacional: Expresión con entero, flotante o variable seguida de un < > y un posible = o un != o == y luego otro entero, flotante o variable
no pueden haber dos < > y un posible = o un != o == juntos o terminar la expresión con < > y un posible = o un != o ==
"""
import re
# Regex necesarias
RegexPatterns = {
'entero': r'^[\-|\+]?\d+$',
'flotante': r'^[\-|\+]?\d+\.\d+$',
'variable': r'^[a-zA-Z_]\w{0,29}$',
'string': r'^\"[^\"]*\"$',
'aritmetica': r'^(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29})([\*\/\+\-\^](\d+|\d+\.\d+|[a-zA-Z_]\w{0,29}))+$',
'relacional': r'^(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29})(([\<\>]\=?|[\!\=]=)(\d+|\d+\.\d+|[a-zA-Z_]\w{0,29}))+$'
}
try:
with open('doc.txt', encoding='utf-8') as file:
for line in file:
flag = False
for regexName, regex in RegexPatterns.items():
foundRegex = re.findall(regex, line)
if line != '\n':
if foundRegex != []:
flag = True
print('{}: es {}'.format(line[0:len(line)-1], regexName))
break
if not flag and line != '\n':
print('{}: no lo conozco'.format(line[0:len(line)-1]))
except Exception as e:
print('Error al abrir el archivo: {}'.format(e)) | [
"="
] | = |
33b9a0b28178626117cfa52bbee000bdf746fae2 | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /QcswPnY2cAbrfwuWE_1.py | 3dc657a83394c0074459ebb833c7727b69c41094 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 173 | py |
def filter_factorials(n):
def is_fact(x):
i=1
while(True):
if x%i<1:x//=i
else:break
i+=1
return x==1
return[i for i in n if is_fact(i)]
| [
"[email protected]"
] | |
4f8410227745a3f05d1ded00c637145222b001f5 | acf03baf55e36dc2a9a5ecd697ec5cb15446ae32 | /vng_api_common/decorators.py | eeb5d658ccfe42cff1208a7c375b34363017ad85 | [] | no_license | GemeenteUtrecht/vng-api-common | b6eb55121dc44c72179cbcd63896bbc020dc3975 | 97786cac041d867468007f9b9a9703d21644391a | refs/heads/master | 2021-06-29T01:53:11.647065 | 2019-07-18T12:04:39 | 2019-07-18T12:05:27 | 198,412,309 | 0 | 0 | null | 2019-07-23T10:57:53 | 2019-07-23T10:57:52 | null | UTF-8 | Python | false | false | 279 | py | from typing import Any
from django.db.models.base import ModelBase
def field_default(field: str, default: Any):
def decorator(cls: ModelBase):
model_field = cls._meta.get_field(field)
model_field.default = default
return cls
return decorator
| [
"[email protected]"
] | |
d52897974dca71896f8010946a8b51fba5aaf253 | 7d78a18fcb8f34cc84e9439bd19cf491e3e0ec49 | /Code/Particle_Identification/msc-hpc/OLD/OLD/round1/feedforward_python/model/model1/feed_forward_1_dropout_window_sum.py | f0f09278e5686de8562c923232116972c2fce202 | [] | no_license | PsycheShaman/MSc-thesis | 62767951b67b922ce5a21cad5bdb258998b7d2ea | 34504499df64c7d6cc7c89af9618cd58d6378e8e | refs/heads/master | 2022-03-12T07:17:57.309357 | 2019-12-10T21:17:39 | 2019-12-10T21:17:39 | 151,471,442 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,372 | py | print("==============================================================================================")
print("starting........................................................................................")
import glob
import numpy as np
print("imported glob, np........................................................................................")
x_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/ff/x_*.pkl")
y_files = glob.glob("/scratch/vljchr004/data/msc-thesis-data/ff/y_*.pkl")
#x_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\ff\\x_*.pkl")
#y_files = glob.glob("C:\\Users\\gerhard\\Documents\\msc-thesis-data\\ff\\y_*.pkl")
import pickle
print("loading first x pickle........................................................................................")
with open(x_files[0], 'rb') as x_file0:
x = pickle.load(x_file0)
print("loading first y pickle........................................................................................")
with open(y_files[0], 'rb') as y_file0:
y = pickle.load(y_file0)
print("recursively adding x pickles........................................................................................")
for i in x_files[1:]:
#for i in x_files[1:2]:
with open(i,'rb') as x_file:
xi = pickle.load(x_file)
x = np.concatenate((x,xi),axis=0)
print("recursively adding y pickles........................................................................................")
for i in y_files[1:]:
#for i in y_files[1:2]:
with open(i,'rb') as y_file:
yi = pickle.load(y_file)
y = np.concatenate((y,yi),axis=None)
nz = np.array([np.count_nonzero(i) for i in x])
zeros = np.where(nz==0)
x = np.delete(x,zeros,axis=0)
y = np.delete(y,zeros)
#oversample electrons
elec = np.where(y==1)
pion = np.where(y!=1)
electrons_x = x[elec,:]
electrons_y = y[elec]
electrons_x = np.squeeze(electrons_x)
x = np.concatenate((electrons_x,x,electrons_x),axis=0)
y = np.concatenate((electrons_y,y,electrons_y),axis=None)
mu = np.mean(x)
x = np.true_divide(x,mu)
x_add = np.array([np.array((np.sum(i[0:2]),np.sum(i[3:5]),np.sum(i[6:8]),np.sum(i[9:11]),
np.sum(i[12:14]),
np.sum(i[15:17]),np.sum(i[18:20]),np.sum(i[21:23]))) for i in x])
x = np.hstack((x,x_add))
from tensorflow.keras.utils import to_categorical
#y = to_categorical(y)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,random_state=123456)
import tensorflow
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation
num_classes = 2
epochs = 100
y_train = tensorflow.keras.utils.to_categorical(y_train, num_classes)
y_test = tensorflow.keras.utils.to_categorical(y_test, num_classes)
model1_dropout_0_5 = Sequential([
Dense(256, input_shape=(32,)),
Activation('relu'),
Dropout(0.5),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(64),
Activation('relu'),
Dropout(0.5),
Dense(2),
Activation('softmax')
])
model1_dropout_0_5.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model1_dropout_0_5.fit(x_train, y_train,
#batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
shuffle=True,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.close()
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_5_history.png', bbox_inches='tight')
model1_dropout_0_5.probs = model1_dropout_0_5.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/msc-hpc/feedforward_python/results/feed_forward_1__dropout_0_5_results.csv", np.array(model1_dropout_0_5.probs), fmt="%s")
model1_dropout_0_5.save('/home/vljchr004/msc-hpc/feedforward_python/feed_forward_1__dropout_0_5.h5') # creates a HDF5 file 'my_model.h5'
del model1_dropout_0_5
model1_dropout_0_8 = Sequential([
Dense(256, input_shape=(32,)),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.8),
Dense(64),
Activation('relu'),
Dropout(0.8),
Dense(2),
Activation('softmax')
])
model1_dropout_0_8.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model1_dropout_0_8.fit(x_train, y_train,
#batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
shuffle=True,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_8_history.png', bbox_inches='tight')
model1_dropout_0_8.probs = model1_dropout_0_8.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/msc-hpc/feedforward_python/results/feed_forward_1__dropout_0_8_results.csv", np.array(model1_dropout_0_8.probs), fmt="%s")
model1_dropout_0_8.save('/home/vljchr004/msc-hpc/feedforward_python/feed_forward_1__dropout_0_8.h5') # creates a HDF5 file 'my_model.h5'
del model1_dropout_0_8
model1_dropout_0_8_0_5 = Sequential([
Dense(256, input_shape=(32,)),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.8),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(64),
Activation('relu'),
Dropout(0.5),
Dense(2),
Activation('softmax')
])
model1_dropout_0_8_0_5.compile(loss='categorical_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
history = model1_dropout_0_8_0_5.fit(x_train, y_train,
#batch_size=batch_size,
epochs=epochs,
validation_split=0.15,
shuffle=True,
verbose=2)
import matplotlib.pyplot as plt
# summarize history for accuracy
plt.plot(history.history['acc'])
plt.plot(history.history['val_acc'])
plt.title('model accuracy')
plt.ylabel('accuracy')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_8_0_5_history2.png', bbox_inches='tight')
# summarize history for loss
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('model loss')
plt.ylabel('loss')
plt.xlabel('epoch')
plt.legend(['train', 'test'], loc='upper left')
plt.savefig('/home/vljchr004/msc-hpc/feedforward_python/fig/feed_forward_1_dropout_0_8_0_5_history2.png', bbox_inches='tight')
model1_dropout_0_8_0_5.probs = model1_dropout_0_8_0_5.predict_proba(x_test)
import numpy as np
np.savetxt("/home/vljchr004/msc-hpc/feedforward_python/results/feed_forward_1__dropout_0_8_0_5_results.csv", np.array(model1_dropout_0_8_0_5.probs), fmt="%s")
model1_dropout_0_8_0_5.save('/home/vljchr004/msc-hpc/feedforward_python/feed_forward_1__dropout_0_8_0_5.h5') # creates a HDF5 file 'my_model.h5'
del model1_dropout_0_8_0_5
| [
"[email protected]"
] | |
07e7f05c86afb0643a2049ea590bf26c009ad663 | ec6f8a634c607e65300bf9812c79dbf780c351d0 | /raspberrypi_files/field4off.py | 6f2ec758df0e1d77b337b75f96e84b9be40755e0 | [] | no_license | prateek-chouhan05/Home-automatation-system | ccbcd8edaba74ac5564c49187517a2991b4163db | aafc3eebe45bdc075f04403b63e44e67ab0c2bc7 | refs/heads/master | 2023-07-09T13:56:28.028748 | 2020-11-09T06:29:13 | 2020-11-09T06:29:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(23, GPIO.OUT)
GPIO.output(23, GPIO.HIGH)
| [
"[email protected]"
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.